repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
714
810k
license
stringclasses
15 values
jorge2703/scikit-learn
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
218
3893
""" ============================================== Feature agglomeration vs. univariate selection ============================================== This example compares 2 dimensionality reduction strategies: - univariate feature selection with Anova - feature agglomeration with Ward hierarchical clustering Both methods are compared in a regression problem using a BayesianRidge as supervised estimator. """ # Author: Alexandre Gramfort <[email protected]> # License: BSD 3 clause print(__doc__) import shutil import tempfile import numpy as np import matplotlib.pyplot as plt from scipy import linalg, ndimage from sklearn.feature_extraction.image import grid_to_graph from sklearn import feature_selection from sklearn.cluster import FeatureAgglomeration from sklearn.linear_model import BayesianRidge from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.externals.joblib import Memory from sklearn.cross_validation import KFold ############################################################################### # Generate data n_samples = 200 size = 40 # image size roi_size = 15 snr = 5. np.random.seed(0) mask = np.ones([size, size], dtype=np.bool) coef = np.zeros((size, size)) coef[0:roi_size, 0:roi_size] = -1. coef[-roi_size:, -roi_size:] = 1. X = np.random.randn(n_samples, size ** 2) for x in X: # smooth data x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel() X -= X.mean(axis=0) X /= X.std(axis=0) y = np.dot(X, coef.ravel()) noise = np.random.randn(y.shape[0]) noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2) y += noise_coef * noise # add noise ############################################################################### # Compute the coefs of a Bayesian Ridge with GridSearch cv = KFold(len(y), 2) # cross-validation generator for model selection ridge = BayesianRidge() cachedir = tempfile.mkdtemp() mem = Memory(cachedir=cachedir, verbose=1) # Ward agglomeration followed by BayesianRidge connectivity = grid_to_graph(n_x=size, n_y=size) ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity, memory=mem) clf = Pipeline([('ward', ward), ('ridge', ridge)]) # Select the optimal number of parcels with grid search clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv) clf.fit(X, y) # set the best parameters coef_ = clf.best_estimator_.steps[-1][1].coef_ coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_) coef_agglomeration_ = coef_.reshape(size, size) # Anova univariate feature selection followed by BayesianRidge f_regression = mem.cache(feature_selection.f_regression) # caching function anova = feature_selection.SelectPercentile(f_regression) clf = Pipeline([('anova', anova), ('ridge', ridge)]) # Select the optimal percentage of features with grid search clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv) clf.fit(X, y) # set the best parameters coef_ = clf.best_estimator_.steps[-1][1].coef_ coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_) coef_selection_ = coef_.reshape(size, size) ############################################################################### # Inverse the transformation to plot the results on an image plt.close('all') plt.figure(figsize=(7.3, 2.7)) plt.subplot(1, 3, 1) plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("True weights") plt.subplot(1, 3, 2) plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("Feature Selection") plt.subplot(1, 3, 3) plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("Feature Agglomeration") plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26) plt.show() # Attempt to remove the temporary cachedir, but don't worry if it fails shutil.rmtree(cachedir, ignore_errors=True)
bsd-3-clause
glennq/scikit-learn
sklearn/datasets/tests/test_rcv1.py
322
2414
"""Test the rcv1 loader. Skipped if rcv1 is not already downloaded to data_home. """ import errno import scipy.sparse as sp import numpy as np from sklearn.datasets import fetch_rcv1 from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import SkipTest def test_fetch_rcv1(): try: data1 = fetch_rcv1(shuffle=False, download_if_missing=False) except IOError as e: if e.errno == errno.ENOENT: raise SkipTest("Download RCV1 dataset to run this test.") X1, Y1 = data1.data, data1.target cat_list, s1 = data1.target_names.tolist(), data1.sample_id # test sparsity assert_true(sp.issparse(X1)) assert_true(sp.issparse(Y1)) assert_equal(60915113, X1.data.size) assert_equal(2606875, Y1.data.size) # test shapes assert_equal((804414, 47236), X1.shape) assert_equal((804414, 103), Y1.shape) assert_equal((804414,), s1.shape) assert_equal(103, len(cat_list)) # test ordering of categories first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151'] assert_array_equal(first_categories, cat_list[:6]) # test number of sample for some categories some_categories = ('GMIL', 'E143', 'CCAT') number_non_zero_in_cat = (5, 1206, 381327) for num, cat in zip(number_non_zero_in_cat, some_categories): j = cat_list.index(cat) assert_equal(num, Y1[:, j].data.size) # test shuffling and subset data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77, download_if_missing=False) X2, Y2 = data2.data, data2.target s2 = data2.sample_id # The first 23149 samples are the training samples assert_array_equal(np.sort(s1[:23149]), np.sort(s2)) # test some precise values some_sample_ids = (2286, 3274, 14042) for sample_id in some_sample_ids: idx1 = s1.tolist().index(sample_id) idx2 = s2.tolist().index(sample_id) feature_values_1 = X1[idx1, :].toarray() feature_values_2 = X2[idx2, :].toarray() assert_almost_equal(feature_values_1, feature_values_2) target_values_1 = Y1[idx1, :].toarray() target_values_2 = Y2[idx2, :].toarray() assert_almost_equal(target_values_1, target_values_2)
bsd-3-clause
fclesio/learning-space
Lightning Talk @Movile - ML with Scikit-Learn/Recipes/kmeans.py
1
2122
# Link: http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_iris.html#example-cluster-plot-cluster-iris-py print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.cluster import KMeans from sklearn import datasets np.random.seed(5) centers = [[1, 1], [-1, -1], [1, -1]] iris = datasets.load_iris() X = iris.data y = iris.target estimators = {'k_means_iris_3': KMeans(n_clusters=3), 'k_means_iris_8': KMeans(n_clusters=8), 'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1, init='random')} fignum = 1 for name, est in estimators.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134) plt.cla() est.fit(X) labels = est.labels_ ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float)) ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('Petal width') ax.set_ylabel('Sepal length') ax.set_zlabel('Petal length') fignum = fignum + 1 # Plot the ground truth fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134) plt.cla() for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]: ax.text3D(X[y == label, 3].mean(), X[y == label, 0].mean() + 1.5, X[y == label, 2].mean(), name, horizontalalignment='center', bbox=dict(alpha=.5, edgecolor='w', facecolor='w')) # Reorder the labels to have colors matching the cluster results y = np.choose(y, [1, 2, 0]).astype(np.float) ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y) ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('Petal width') ax.set_ylabel('Sepal length') ax.set_zlabel('Petal length') plt.show()
gpl-2.0
mbr0wn/gnuradio
gr-fec/python/fec/polar/channel_construction_awgn.py
4
7916
#!/usr/bin/env python # # Copyright 2015 Free Software Foundation, Inc. # # SPDX-License-Identifier: GPL-3.0-or-later # # ''' Based on 2 papers: [1] Ido Tal, Alexander Vardy: 'How To Construct Polar Codes', 2013 for an in-depth description of a widely used algorithm for channel construction. [2] Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015 for an overview of different approaches ''' from scipy.optimize import fsolve from scipy.special import erfc from .helper_functions import * from .channel_construction_bec import bhattacharyya_bounds def solver_equation(val, s): cw_lambda = codeword_lambda_callable(s) ic_lambda = instantanious_capacity_callable() return lambda y: ic_lambda(cw_lambda(y)) - val def solve_capacity(a, s): eq = solver_equation(a, s) res = fsolve(eq, 1) return np.abs(res[0]) # only positive values needed. def codeword_lambda_callable(s): return lambda y: np.exp(-2 * y * np.sqrt(2 * s)) def codeword_lambda(y, s): return codeword_lambda_callable(s)(y) def instantanious_capacity_callable(): return lambda x : 1 - np.log2(1 + x) + (x * np.log2(x) / (1 + x)) def instantanious_capacity(x): return instantanious_capacity_callable()(x) def q_function(x): # Q(x) = (1 / sqrt(2 * pi) ) * integral (x to inf) exp(- x ^ 2 / 2) dx return .5 * erfc(x / np.sqrt(2)) def discretize_awgn(mu, design_snr): ''' needed for Binary-AWGN channels. in [1] described in Section VI in [2] described as a function of the same name. in both cases reduce infinite output alphabet to a finite output alphabet of a given channel. idea: 1. instantaneous capacity C(x) in interval [0, 1] 2. split into mu intervals. 3. find corresponding output alphabet values y of likelihood ratio function lambda(y) inserted into C(x) 4. Calculate probability for each value given that a '0' or '1' is was transmitted. ''' s = 10 ** (design_snr / 10) a = np.zeros(mu + 1, dtype=float) a[-1] = np.inf for i in range(1, mu): a[i] = solve_capacity(1. * i / mu, s) factor = np.sqrt(2 * s) tpm = np.zeros((2, mu)) for j in range(mu): tpm[0][j] = q_function(factor + a[j]) - q_function(factor + a[j + 1]) tpm[1][j] = q_function(-1. * factor + a[j]) - q_function(-1. * factor + a[j + 1]) tpm = tpm[::-1] tpm[0] = tpm[0][::-1] tpm[1] = tpm[1][::-1] return tpm def instant_capacity_delta_callable(): return lambda a, b: -1. * (a + b) * np.log2((a + b) / 2) + a * np.log2(a) + b * np.log2(b) def capacity_delta_callable(): c = instant_capacity_delta_callable() return lambda a, b, at, bt: c(a, b) + c(at, bt) - c(a + at, b + bt) def quantize_to_size(tpm, mu): # This is a degrading merge, compare [1] calculate_delta_I = capacity_delta_callable() L = np.shape(tpm)[1] if not mu < L: print('WARNING: This channel gets too small!') # lambda works on vectors just fine. Use Numpy vector awesomeness. delta_i_vec = calculate_delta_I(tpm[0, 0:-1], tpm[1, 0:-1], tpm[0, 1:], tpm[1, 1:]) for i in range(L - mu): d = np.argmin(delta_i_vec) ap = tpm[0, d] + tpm[0, d + 1] bp = tpm[1, d] + tpm[1, d + 1] if d > 0: delta_i_vec[d - 1] = calculate_delta_I(tpm[0, d - 1], tpm[1, d - 1], ap, bp) if d < delta_i_vec.size - 1: delta_i_vec[d + 1] = calculate_delta_I(ap, bp, tpm[0, d + 1], tpm[1, d + 1]) delta_i_vec = np.delete(delta_i_vec, d) tpm = np.delete(tpm, d, axis=1) tpm[0, d] = ap tpm[1, d] = bp return tpm def upper_bound_z_params(z, block_size, design_snr): upper_bound = bhattacharyya_bounds(design_snr, block_size) z = np.minimum(z, upper_bound) return z def tal_vardy_tpm_algorithm(block_size, design_snr, mu): mu = mu // 2 # make sure algorithm uses only as many bins as specified. block_power = power_of_2_int(block_size) channels = np.zeros((block_size, 2, mu)) channels[0] = discretize_awgn(mu, design_snr) * 2 print('Constructing polar code with Tal-Vardy algorithm') print('(block_size = {0}, design SNR = {1}, mu = {2}'.format(block_size, design_snr, 2 * mu)) show_progress_bar(0, block_size) for j in range(0, block_power): u = 2 ** j for t in range(u): show_progress_bar(u + t, block_size) # print("(u={0}, t={1}) = {2}".format(u, t, u + t)) ch1 = upper_convolve(channels[t], mu) ch2 = lower_convolve(channels[t], mu) channels[t] = quantize_to_size(ch1, mu) channels[u + t] = quantize_to_size(ch2, mu) z = np.zeros(block_size) for i in range(block_size): z[i] = bhattacharyya_parameter(channels[i]) z = z[bit_reverse_vector(np.arange(block_size), block_power)] z = upper_bound_z_params(z, block_size, design_snr) show_progress_bar(block_size, block_size) print('') print('channel construction DONE') return z def merge_lr_based(q, mu): lrs = q[0] / q[1] vals, indices, inv_indices = np.unique(lrs, return_index=True, return_inverse=True) # compare [1] (20). Ordering of representatives according to LRs. temp = np.zeros((2, len(indices)), dtype=float) if vals.size < mu: return q for i in range(len(indices)): merge_pos = np.where(inv_indices == i)[0] sum_items = q[:, merge_pos] if merge_pos.size > 1: sum_items = np.sum(q[:, merge_pos], axis=1) temp[0, i] = sum_items[0] temp[1, i] = sum_items[1] return temp def upper_convolve(tpm, mu): q = np.zeros((2, mu ** 2)) idx = -1 for i in range(mu): idx += 1 q[0, idx] = (tpm[0 / i] ** 2 + tpm[1, i] ** 2, 2) q[1, idx] = tpm[0, i] * tpm[1, i] for j in range(i + 1, mu): idx += 1 q[0, idx] = tpm[0, i] * tpm[0, j] + tpm[1, i] * tpm[1, j] q[1, idx] = tpm[0, i] * tpm[1, j] + tpm[1, i] * tpm[0, j] if q[0, idx] < q[1, idx]: q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx]) idx += 1 q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1) q = merge_lr_based(q, mu) q = normalize_q(q, tpm) return q def lower_convolve(tpm, mu): q = np.zeros((2, mu * (mu + 1))) idx = -1 for i in range(0, mu): idx += 1 q[0, idx] = (tpm[0 / i] ** 2, 2) q[1, idx] = (tpm[1 / i] ** 2, 2) if q[0, idx] < q[1, idx]: q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx]) idx += 1 q[0, idx] = tpm[0, i] * tpm[1, i] q[1, idx] = q[0, idx] for j in range(i + 1, mu): idx += 1 q[0, idx] = tpm[0, i] * tpm[0, j] q[1, idx] = tpm[1, i] * tpm[1, j] if q[0, idx] < q[1, idx]: q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx]) idx += 1 q[0, idx] = tpm[0, i] * tpm[1, j] q[1, idx] = tpm[1, i] * tpm[0, j] if q[0, idx] < q[1, idx]: q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx]) idx += 1 q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1) q = merge_lr_based(q, mu) q = normalize_q(q, tpm) return q def swap_values(first, second): return second, first def normalize_q(q, tpm): original_factor = np.sum(tpm) next_factor = np.sum(q) factor = original_factor / next_factor return q * factor def main(): print('channel construction AWGN main') n = 8 m = 2 ** n design_snr = 0.0 mu = 16 z_params = tal_vardy_tpm_algorithm(m, design_snr, mu) print(z_params) if 0: import matplotlib.pyplot as plt plt.plot(z_params) plt.show() if __name__ == '__main__': main()
gpl-3.0
lthurlow/Network-Grapher
proj/external/matplotlib-1.2.1/doc/mpl_examples/axes_grid/demo_curvelinear_grid2.py
15
1839
import numpy as np #from matplotlib.path import Path import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.grid_helper_curvelinear import GridHelperCurveLinear from mpl_toolkits.axes_grid.axislines import Subplot import mpl_toolkits.axes_grid.angle_helper as angle_helper def curvelinear_test1(fig): """ grid for custom transform. """ def tr(x, y): sgn = np.sign(x) x, y = np.abs(np.asarray(x)), np.asarray(y) return sgn*x**.5, y def inv_tr(x,y): sgn = np.sign(x) x, y = np.asarray(x), np.asarray(y) return sgn*x**2, y extreme_finder = angle_helper.ExtremeFinderCycle(20, 20, lon_cycle = None, lat_cycle = None, lon_minmax = None, #(0, np.inf), lat_minmax = None, ) grid_helper = GridHelperCurveLinear((tr, inv_tr), extreme_finder=extreme_finder) ax1 = Subplot(fig, 111, grid_helper=grid_helper) # ax1 will have a ticks and gridlines defined by the given # transform (+ transData of the Axes). Note that the transform of # the Axes itself (i.e., transData) is not affected by the given # transform. fig.add_subplot(ax1) ax1.imshow(np.arange(25).reshape(5,5), vmax = 50, cmap=plt.cm.gray_r, interpolation="nearest", origin="lower") # tick density grid_helper.grid_finder.grid_locator1._nbins = 6 grid_helper.grid_finder.grid_locator2._nbins = 6 if 1: fig = plt.figure(1, figsize=(7, 4)) fig.clf() curvelinear_test1(fig) plt.show()
mit
AIML/scikit-learn
benchmarks/bench_plot_fastkmeans.py
294
4676
from __future__ import print_function from collections import defaultdict from time import time import numpy as np from numpy import random as nr from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) chunk = 100 max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('==============================') print('Iteration %03d of %03d' % (it, max_it)) print('==============================') print() data = nr.random_integers(-50, 50, (n_samples, n_features)) print('K-Means') tstart = time() kmeans = KMeans(init='k-means++', n_clusters=10).fit(data) delta = time() - tstart print("Speed: %0.3fs" % delta) print("Inertia: %0.5f" % kmeans.inertia_) print() results['kmeans_speed'].append(delta) results['kmeans_quality'].append(kmeans.inertia_) print('Fast K-Means') # let's prepare the data in small chunks mbkmeans = MiniBatchKMeans(init='k-means++', n_clusters=10, batch_size=chunk) tstart = time() mbkmeans.fit(data) delta = time() - tstart print("Speed: %0.3fs" % delta) print("Inertia: %f" % mbkmeans.inertia_) print() print() results['MiniBatchKMeans Speed'].append(delta) results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_) return results def compute_bench_2(chunks): results = defaultdict(lambda: []) n_features = 50000 means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1], [0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]]) X = np.empty((0, 2)) for i in range(8): X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)] max_it = len(chunks) it = 0 for chunk in chunks: it += 1 print('==============================') print('Iteration %03d of %03d' % (it, max_it)) print('==============================') print() print('Fast K-Means') tstart = time() mbkmeans = MiniBatchKMeans(init='k-means++', n_clusters=8, batch_size=chunk) mbkmeans.fit(X) delta = time() - tstart print("Speed: %0.3fs" % delta) print("Inertia: %0.3fs" % mbkmeans.inertia_) print() results['MiniBatchKMeans Speed'].append(delta) results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(50, 150, 5).astype(np.int) features_range = np.linspace(150, 50000, 5).astype(np.int) chunks = np.linspace(500, 10000, 15).astype(np.int) results = compute_bench(samples_range, features_range) results_2 = compute_bench_2(chunks) max_time = max([max(i) for i in [t for (label, t) in results.iteritems() if "speed" in label]]) max_inertia = max([max(i) for i in [ t for (label, t) in results.iteritems() if "speed" not in label]]) fig = plt.figure('scikit-learn K-Means benchmark results') for c, (label, timings) in zip('brcy', sorted(results.iteritems())): if 'speed' in label: ax = fig.add_subplot(2, 2, 1, projection='3d') ax.set_zlim3d(0.0, max_time * 1.1) else: ax = fig.add_subplot(2, 2, 2, projection='3d') ax.set_zlim3d(0.0, max_inertia * 1.1) X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') i = 0 for c, (label, timings) in zip('br', sorted(results_2.iteritems())): i += 1 ax = fig.add_subplot(2, 2, i + 2) y = np.asarray(timings) ax.plot(chunks, y, color=c, alpha=0.8) ax.set_xlabel('Chunks') ax.set_ylabel(label) plt.show()
bsd-3-clause
pydata/xarray
asv_bench/benchmarks/dataarray_missing.py
4
1717
import pandas as pd import xarray as xr from . import randn, requires_dask try: import dask # noqa: F401 except ImportError: pass def make_bench_data(shape, frac_nan, chunks): vals = randn(shape, frac_nan) coords = {"time": pd.date_range("2000-01-01", freq="D", periods=shape[0])} da = xr.DataArray(vals, dims=("time", "x", "y"), coords=coords) if chunks is not None: da = da.chunk(chunks) return da def time_interpolate_na(shape, chunks, method, limit): if chunks is not None: requires_dask() da = make_bench_data(shape, 0.1, chunks=chunks) actual = da.interpolate_na(dim="time", method="linear", limit=limit) if chunks is not None: actual = actual.compute() time_interpolate_na.param_names = ["shape", "chunks", "method", "limit"] time_interpolate_na.params = ( [(3650, 200, 400), (100, 25, 25)], [None, {"x": 25, "y": 25}], ["linear", "spline", "quadratic", "cubic"], [None, 3], ) def time_ffill(shape, chunks, limit): da = make_bench_data(shape, 0.1, chunks=chunks) actual = da.ffill(dim="time", limit=limit) if chunks is not None: actual = actual.compute() time_ffill.param_names = ["shape", "chunks", "limit"] time_ffill.params = ( [(3650, 200, 400), (100, 25, 25)], [None, {"x": 25, "y": 25}], [None, 3], ) def time_bfill(shape, chunks, limit): da = make_bench_data(shape, 0.1, chunks=chunks) actual = da.bfill(dim="time", limit=limit) if chunks is not None: actual = actual.compute() time_bfill.param_names = ["shape", "chunks", "limit"] time_bfill.params = ( [(3650, 200, 400), (100, 25, 25)], [None, {"x": 25, "y": 25}], [None, 3], )
apache-2.0
MTgeophysics/mtpy
mtpy/imaging/plotnresponses.py
1
117024
# -*- coding: utf-8 -*- """ plots multiple MT responses simultaneously Created on Thu May 30 17:02:39 2013 @author: jpeacock-pr YG: the code there is massey, todo may need to rewrite it sometime """ # ============================================================================ import matplotlib.colorbar as mcb import matplotlib.colors as colors import matplotlib.gridspec as gridspec import matplotlib.patches as patches import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import MultipleLocator import mtpy.imaging.mtcolors as mtcl import mtpy.imaging.mtplottools as mtpl from mtpy.analysis.pt import PhaseTensor from mtpy.analysis.zinvariants import Zinvariants # reload(mtpl) # ============================================================================ from mtpy.core.mt import MT class PlotMultipleResponses(mtpl.PlotSettings): """ plots multiple MT responses simultaneously either in single plots or in one plot of sub-figures or in a single plot with subfigures for each component. expecting only one type of input --> can be: **fn_list** : list of filenames to plot **z_object_list** : list of mtpy.core.z.Z objects **res_object_list** : list of mtpy.imaging.mtplot.ResPhase objects **tipper_object_list** : list of mtpy.imaging.mtplot.Tipper objects **mt_object_list** : list of mtpy.imaging.mtplot.MTplot objects Arguments: ---------- **fn_list** : list of filenames to plot ie. [fn_1, fn_2, ...], *default* is None **z_object_list** : list of mtpy.core.z.Z objects *default* is None **res_object_list** : list of mtpy.imaging.mtplot.ResPhase objects *default* is None **tipper_object_list** : list of mtpy.imaging.mtplot.Tipper objects *default* is None **mt_object_list** : list of mtpy.imaging.mtplot.MTplot objects *default* is None **fig_num** : int figure number *default* is 1 **fig_size** : [width, height] of figure size in inches **rot_z** : float or np.ndarray rotation angle of impedance tensor (deg or radians), *Note* : rotaion is clockwise positive *default* is 0 Can input so each station is rotated at a constant angle or each period is rotated differently, or both. **plot_num** : [ 1 | 2 | 3 ] * 1 for just Ex/By and Ey/Bx *default* * 2 for all 4 components * 3 for off diagonal plus the determinant **plot_style** : [ '1' | 'all' | 'compare' ] determines the plotting style: * '1' for plotting each station in a different figure. *default* * 'all' for plotting each station in a subplot all in the same figure * 'compare' for comparing the responses all in one plot. Here the responses are colored from dark to light. This plot can get messy if too many stations are plotted. **plot_title** : string title of plot *default* is station name **plot_tipper** : [ 'yri' | 'yr' | 'yi' | 'n' ] Plots the tipper in a bottom pannel * 'yri' --> plots the real and imaginar parts * 'yr' --> plots just the real part * 'yi' --> plots just the imaginary part **Note:** the convention is to point towards a conductor. Can change this by setting the parameter arrow_direction = 1. **plot_strike** : [ 'y' | 1 | 2 | 3 | 'n' ] Plots the strike angle from different parameters: * 'y' --> plots strike angle determined from the invariants of Weaver et al. [2000] and the phase tensor of Caldwell et al. [2004], if Tipper is plotted the strike of the tipper is also plotted. * 1 --> plots strike angle determined from the invariants of Weaver et al. [2000] * 2 --> plots strike angle determined from the phase tensor of Caldwell et al. [2004] * 3 --> plots strike angle determined from the tipper * 'n' --> doesn't plot the strike, *default* **plot_skew** : [ 'y' | 'n' ] string for plotting skew angle. This is plotted in the same plot as strike angle at the moment. * 'y' for plotting the skew * 'n' for not plotting skew *default* **fig_dpi** : int dots-per-inch resolution, *default* is 300 :Example: :: >>> import mtpy.imaging.mtplottools as mtplot >>> import os >>> edipath = r"/home/Edifiles" >>> edilist = [os.path.join(edipath,edi) >>> ... for edi in os.listdir(edipath) >>> ... if edi.find('.edi')>0] >>> plot each station in a subplot all in one figure with tipper >>> rp1 = mtplot.PlotMultipleResPhase(fn_list=edilist, plotnum=1, >>> ... plot_tipper='yr', >>> ... plot_style='all') Attributes: ----------- -mt_list list of mtplot.MTplot objects made from inputs -fignum figure number for plotting -fig_size figure size in inches [width, height] -plotnum plot type, see arguments for details -title title of the plot, *default* is station name -dpi Dots-per-inch resolution of plot, *default* is 300 -rotz Rotate impedance tensor by this angle (deg) assuming that North is 0 and angle is positive clockwise -plot_tipper string to tell the program to plot tipper arrows or not, see accepted values above in arguments -plot_strike string or integer telling the program to plot the strike angle, see values above in arguments -plot_skew string to tell the program to plot skew angle. The skew is plotted in the same subplot as the strike angle at the moment -period period array cooresponding to the impedance tensor -font_size size of font for the axis ticklabels, note that the axis labels will be font_size+2 -axr matplotlib.axes object for the xy,yx resistivity plot. -axp matplotlib.axes object for the xy,yx phase plot -axt matplotlib.axes object for the tipper plot -ax2r matplotlib.axes object for the xx,yy resistivity plot -ax2p matplotlib.axes object for the xx,yy phase plot -axs matplotlib.axes object for the strike plot -axs2 matplotlib.axes object for the skew plot .. **Note:** that from these axes object you have control of the plot. You can do this by changing any parameter in the axes object and then calling update_plot() -erxyr class matplotlib.container.ErrorbarContainer for xy apparent resistivity. -erxyp class matplotlib.container.ErrorbarContainer for xy. -eryxr class matplotlib.container.ErrorbarContainer for yx apparent resistivity. -eryxp class matplotlib.container.ErrorbarContainer for yx phase. .. **Note:** that from these line objects you can manipulate the error bar properties and then call update_plot() -xy_ls line style for xy and xx components, *default* is None -yx_ls line style for yx and yy components, *default* is None -det_ls line style for determinant, *default* is None -xy_marker marker for xy and xx, *default* is squares -yx_marker marker for yx and yy, *default* is circles -det_marker marker for determinant, *default* is diamonds -xy_color marker color for xy and xx, *default* is blue -yx_color marker color for yx and yy, *default* is red -det_color marker color for determinant, *default* is green -xy_mfc marker face color for xy and xx, *default* is None -yx_mfc marker face color for yx and yy, *default* is None -det_mfc marker face color for determinant, *default* is None -skew_marker marker for skew angle, *default* is 'd' -skew_color color for skew angle, *default* is 'orange' -strike_inv_marker marker for strike angle determined by invariants *default* is '^' -strike_inv_color color for strike angle determined by invaraiants *default* is (.2, .2, .7) -strike_pt_marker marker for strike angle determined by pt, *default* is'v' -strike_pt_color color for strike angle determined by pt *default* is (.7, .2, .2) -strike_tip_marker marker for strike angle determined by tipper *default* is '>' -strike_tip_color color for strike angle determined by tipper *default* is (.2, .7, .2) -marker_size size of marker in relative dimenstions, *default* is 2 -marker_lw line width of marker, *default* is 100./dpi .. *For more on line and marker styles see matplotlib.lines.Line2D* -arrow_lw line width of the arrow, *default* is 0.75 -arrow_head_width head width of the arrow, *default* is 0 for no arrow head. Haven't found a good way to scale the arrow heads in a log scale. -arrow_head_height head width of the arrow, *default* is 0 for no arrow head. Haven't found a good way to scale the arrow heads in a log scale. -arrow_color_real color of the real arrows, *default* is black -arrow_color_imag color of the imaginary arrows, *default* is blue -arrow_direction 0 for pointing towards a conductor and -1 for pointing away from a conductor. -xlimits limits on the x-limits (period), *default* is None which will estimate the min and max from the data, setting the min as the floor(min(period)) and the max as ceil(max(period)). Input in linear scale if you want to change the period limits, ie. (.1,1000) -res_limits limits on the resistivity, *default* is None, which will estimate the min and max from the data, rounding to the lowest and highest increments to the power of 10 Input in linear scale if you want to change them, ie. (1,10000). Note this only sets the xy and yx components, not the xx and yy. -phase_limits limits on the phase, *default* is (0,90) but will adapt to the data if there is phase above 90 or below 0. Input in degrees. Note this only changes the xy and yx components. -tipper_limits limits of the y-axis, *default* is (-1,1) """ def __init__(self, **kwargs): """ Initialize parameters """ super(PlotMultipleResponses, self).__init__() fn_list = kwargs.pop('fn_list', None) z_object_list = kwargs.pop('z_object_list', None) tipper_object_list = kwargs.pop('tipper_object_list', None) mt_object_list = kwargs.pop('mt_object_list', None) # --> get the inputs into a list of mt objects self.mt_list = mtpl.get_mtlist(fn_list=fn_list, z_object_list=z_object_list, tipper_object_list=tipper_object_list, mt_object_list=mt_object_list) self.fig_num = kwargs.pop('fig_num', self.fig_num) # set some of the properties as attributes much to Lars' discontent self.plot_num = kwargs.pop('plot_num', 1) self.plot_style = kwargs.pop('plot_style', '1') self.plot_title = kwargs.pop('plot_title', None) # if rotation angle is an int or float make an array the length of # mt_list for plotting purposes self._rot_z = kwargs.pop('rot_z', 0) if isinstance(self._rot_z, float) or isinstance(self._rot_z, int): self._rot_z = np.array([self._rot_z] * len(self.mt_list)) # if the rotation angle is an array for rotation of different # freq than repeat that rotation array to the len(mt_list) elif isinstance(self._rot_z, np.ndarray): if self._rot_z.shape[0] != len(self.mt_list): self._rot_z = np.repeat(self._rot_z, len(self.mt_list)) else: pass self._set_rot_z(self._rot_z) # set plot limits self.xlimits = kwargs.pop('xlimits', None) self.res_limits = kwargs.pop('res_limits', None) self.phase_limits = kwargs.pop('phase_limits', None) self.tipper_limits = kwargs.pop('tipper_limits', None) self.strike_limits = kwargs.pop('strike_limits', None) self.skew_limits = kwargs.pop('skew_limits', (-9, 9)) self.pt_limits = kwargs.pop('pt_limits', None) # set font parameters self.font_size = kwargs.pop('font_size', 7) # set plot tipper or not self._plot_tipper = kwargs.pop('plot_tipper', 'n') # plot strike angle or not self._plot_strike = kwargs.pop('plot_strike', 'n') # plot skew angle self._plot_skew = kwargs.pop('plot_skew', 'n') # plot phase tensor ellipses self._plot_pt = kwargs.pop('plot_pt', 'n') # order of plots self.plot_order = kwargs.pop('plot_order', ['tip', 'pt', 'strike', 'skew']) self.plot_dict = dict([(kk, vv) for kk, vv in zip(['tip', 'pt', 'strike', 'skew'], [self._plot_tipper, self._plot_pt, self._plot_strike, self._plot_skew])]) # set arrow properties self.arrow_head_length = 0.03 self.arrow_head_width = 0.03 self.arrow_lw = .5 # ellipse_properties self.ellipse_size = 0.25 self.ellipse_spacing = kwargs.pop('ellipse_spacing', 1) if self.ellipse_size == 2 and self.ellipse_spacing == 1: self.ellipse_size = 0.25 # --> set text box parameters self.text_location = kwargs.pop('text_location', None) self.text_xpad = kwargs.pop('text_xpad', 1.35) self.text_ypad = kwargs.pop('text_ypad', .75) self.text_size = kwargs.pop('text_size', 7) self.text_weight = kwargs.pop('text_weight', 'bold') self.plot_yn = kwargs.pop('plot_yn', 'y') # plot on initializing if self.plot_yn == 'y': self.plot() # ---rotate data on setting rot_z def _set_rot_z(self, rot_z): """ need to rotate data when setting z """ # if rotation angle is an int or float make an array the length of # mt_list for plotting purposes if isinstance(rot_z, float) or isinstance(rot_z, int): self._rot_z += np.array([rot_z] * len(self.mt_list)) # if the rotation angle is an array for rotation of different # freq than repeat that rotation array to the len(mt_list) elif isinstance(rot_z, np.ndarray): if rot_z.shape[0] != len(self.mt_list): self._rot_z += np.repeat(rot_z, len(self.mt_list)) else: pass for ii, mt in enumerate(self.mt_list): mt.rot_z = self._rot_z[ii] def _get_rot_z(self): return self._rot_z rot_z = property(fget=_get_rot_z, fset=_set_rot_z, doc="""rotation angle(s)""") # --> on setting plot_ make sure to update the order and list def _set_plot_tipper(self, plot_tipper): """ If plotting tipper make arrow attributes """ self._plot_tipper = plot_tipper self.plot_dict['tip'] = self._plot_tipper def _get_plot_tipper(self): return self._plot_tipper plot_tipper = property(fget=_get_plot_tipper, fset=_set_plot_tipper, doc="""string to plot tipper""") def _set_plot_pt(self, plot_pt): """ If plotting tipper make arrow attributes """ self._plot_pt = plot_pt self.plot_dict['pt'] = self._plot_pt def _get_plot_pt(self): return self._plot_pt plot_pt = property(fget=_get_plot_pt, fset=_set_plot_pt, doc="""string to plot phase tensor ellipses""") def _set_plot_strike(self, plot_strike): """ change plot_dict when changing plot_strike """ self._plot_strike = plot_strike self.plot_dict['strike'] = self._plot_strike def _get_plot_strike(self): return self._plot_strike plot_strike = property(fget=_get_plot_strike, fset=_set_plot_strike, doc="""string to plot strike""") def _set_plot_skew(self, plot_skew): """ change plot_dict when changing plot_strike """ self._plot_skew = plot_skew self.plot_dict['skew'] = self._plot_skew def _get_plot_skew(self): return self._plot_skew plot_skew = property(fget=_get_plot_skew, fset=_set_plot_skew, doc="""string to plot skew""") # ---plot the resistivity and phase def plot(self, show=True): """ plot the apparent resistivity and phase """ # create a dictionary for the number of subplots needed pdict = {'res': 0, 'phase': 1} # start the index at 2 because resistivity and phase is permanent # for now index = 2 for key in self.plot_order: if self.plot_dict[key].find('y') == 0: pdict[key] = index index += 1 # get number of rows needed nrows = index # set height ratios of the subplots hr = [2, 1.5] + [1] * (len(list(pdict.keys())) - 2) # if self.plot_style == '1': # self.plotlist = [] # # #--> plot from edi's if given, don't need to rotate because # # data has already been rotated by the funcion _set_rot_z ## if self.fig_size is None: ## self.fig_size = [6, 6] # for ii, mt in enumerate(self.mt_list, 1): # p1 = plotresponse(mt_object=mt, # fig_num=ii, # fig_size=self.fig_size, # plot_num=self.plot_num, # fig_dpi=self.fig_dpi, # plot_yn='n', # plot_tipper=self._plot_tipper, # plot_strike=self._plot_strike, # plot_skew=self._plot_skew, # plot_pt=self._plot_pt) # # #make sure all the properties are set to match the users # #line style between points # p1.xy_ls = self.xy_ls # p1.yx_ls = self.yx_ls # p1.det_ls = self.det_ls # # #outline color # p1.xy_color = self.xy_color # p1.yx_color = self.yx_color # p1.det_color = self.det_color # # #face color # p1.xy_mfc = self.xy_mfc # p1.yx_mfc = self.yx_mfc # p1.det_mfc = self.det_mfc # # #maker # p1.xy_marker = self.xy_marker # p1.yx_marker = self.yx_marker # p1.det_marker = self.det_marker # # #size # p1.marker_size = 2 # # #set plot limits # p1.xlimits = self.xlimits # p1.res_limits = self.res_limits # p1.phase_limits = self.phase_limits # # #set font parameters # p1.font_size = self.font_size # # #set arrow properties # p1.arrow_lw = self.arrow_lw # p1.arrow_head_width = self.arrow_head_width # p1.arrow_head_length = self.arrow_head_length # p1.arrow_color_real = self.arrow_color_real # p1.arrow_color_imag = self.arrow_color_imag # p1.arrow_direction = self.arrow_direction # p1.tipper_limits = self.tipper_limits # # #skew properties # p1.skew_color = self.skew_color # p1.skew_marker = self.skew_marker # # #strike properties # p1.strike_inv_marker = self.strike_inv_marker # p1.strike_inv_color = self.strike_inv_color # # p1.strike_pt_marker = self.strike_pt_marker # p1.strike_pt_color = self.strike_pt_color # # p1.strike_tip_marker = self.strike_tip_marker # p1.strike_tip_color = self.strike_tip_color # # #--> plot the apparent resistivity and phase # self.plotlist.append(p1) # # p1.plot() # # -----Plot All in one figure with each plot as a subfigure------------ if self.plot_style == 'all': stlist = [] stlabel = [] st_maxlist = [] st_minlist = [] ns = len(self.mt_list) # set some parameters of the figure and subplot spacing plt.rcParams['font.size'] = self.font_size if self.plot_skew == 'y': plt.rcParams['figure.subplot.right'] = .94 else: plt.rcParams['figure.subplot.right'] = .98 plt.rcParams['figure.subplot.bottom'] = .1 plt.rcParams['figure.subplot.top'] = .93 # set the font properties for the axis labels fontdict = {'size': self.font_size + 2, 'weight': 'bold'} # set figure size according to what the plot will be. if self.fig_size is None: if self.plot_num == 1 or self.plot_num == 3: self.fig_size = [ns * 4, 6] elif self.plot_num == 2: self.fig_size = [ns * 8, 6] # make a figure instance self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi) # make subplots as columns for all stations that need to be plotted gs0 = gridspec.GridSpec(1, ns) # space out the subplots gs0.update(hspace=.025, wspace=.025, left=.085) labelcoords = (-0.145, 0.5) axr = None axt = None axpt = None axst = None axsk = None for ii, mt in enumerate(self.mt_list): # type: int, MT # get the reistivity and phase object rp = mt.Z # set x-axis limits from short period to long period if self.xlimits is None: self.xlimits = (10 ** (np.floor(np.log10(mt.period[0]))), 10 ** (np.ceil(np.log10((mt.period[-1]))))) # if self.phase_limits is None: # pass if self.res_limits is None: self.res_limits = (10 ** (np.floor( np.log10(min([mt.Z.res_xy.min(), mt.Z.res_yx.min()])))), 10 ** (np.ceil( np.log10(max([mt.Z.res_xy.max(), mt.Z.res_yx.max()]))))) # create a grid to place the figures into, set to have 2 rows # and 2 columns to put any of the 4 components. Make the phase # plot slightly shorter than the apparent resistivity plot and # have the two close to eachother vertically. gs = gridspec.GridSpecFromSubplotSpec(nrows, 2, subplot_spec=gs0[ii], height_ratios=hr, hspace=0.05, wspace=.0125) # --> create the axes instances for xy, yx if self.plot_num == 1 or self.plot_num == 3: # apparent resistivity axis axr = self.fig.add_subplot(gs[0, :], sharex=axr) # phase axis that shares period axis with resistivity axp = self.fig.add_subplot(gs[1, :], sharex=axr)# --> make figure for xy,yx components # space out the subplots # gs.update(hspace=.05, wspace=.02, left=.1) # --> make figure for all 4 components elif self.plot_num == 2: # --> create the axes instances # apparent resistivity axis axr = self.fig.add_subplot(gs[0, 0], sharex=axr) # phase axis that shares period axis with resistivity axp = self.fig.add_subplot(gs[1, 0], sharex=axr) # space out the subplots # gs.update(hspace=.05, wspace=.02, left=.07) # place y coordinate labels in the same location axr.yaxis.set_label_coords(labelcoords[0], labelcoords[1]) axp.yaxis.set_label_coords(labelcoords[0], labelcoords[1]) # --> plot tipper try: axt = self.fig.add_subplot(gs[pdict['tip'], :], sharey=axt) axt.yaxis.set_label_coords(labelcoords[0], labelcoords[1]) except KeyError: pass # --> plot phase tensors try: # can't share axis because not on the same scale axpt = self.fig.add_subplot(gs[pdict['pt'], :], aspect='equal', sharey=axpt) axpt.yaxis.set_label_coords(labelcoords[0], labelcoords[1]) except KeyError: pass # --> plot strike try: axst = self.fig.add_subplot(gs[pdict['strike'], :], sharex=axr, sharey=axst) axst.yaxis.set_label_coords(labelcoords[0], labelcoords[1]) except KeyError: pass # --> plot skew try: axsk = self.fig.add_subplot(gs[pdict['skew'], :], sharex=axr, sharey=axsk) axsk.yaxis.set_label_coords(labelcoords[0], labelcoords[1]) except KeyError: pass # ---------plot the apparent resistivity---------------------- # --> plot as error bars and just as points xy-blue, yx-red # res_xy ebxyr = axr.errorbar(mt.period, mt.Z.res_xy, marker=self.xy_marker, ms=self.marker_size, mfc=self.xy_mfc, mec=self.xy_color, mew=self.marker_lw, ls=self.xy_ls, yerr=mt.Z.res_err_xy, ecolor=self.xy_color, capsize=self.marker_size, elinewidth=self.marker_lw) # res_yx ebyxr = axr.errorbar(mt.period, mt.Z.res_yx, marker=self.yx_marker, ms=self.marker_size, mfc=self.yx_mfc, mec=self.yx_color, mew=self.marker_lw, ls=self.yx_ls, yerr=mt.Z.res_err_yx, ecolor=self.yx_color, capsize=self.marker_size, elinewidth=self.marker_lw) # --> set axes properties plt.setp(axr.get_xticklabels(), visible=False) axr.set_yscale('log', nonposy='clip') axr.set_xscale('log', nonposx='clip') axr.set_xlim(self.x_limits) axr.set_ylim(self.res_limits) axr.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) if ii == 0: axr.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)', fontdict=fontdict) axr.legend((ebxyr[0], ebyxr[0]), ('$Z_{xy}$', '$Z_{yx}$'), loc=3, markerscale=1, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.02) else: plt.setp(axr.get_yticklabels(), visible=False) # -----Plot the phase---------------------------------------- # phase_xy ebxyp = axp.errorbar(mt.period, mt.Z.phase_xy, marker=self.xy_marker, ms=self.marker_size, mfc=self.xy_mfc, mec=self.xy_color, mew=self.marker_lw, ls=self.xy_ls, yerr=mt.Z.phase_err_xy, ecolor=self.xy_color, capsize=self.marker_size, elinewidth=self.marker_lw) # phase_yx: ebyxp = axp.errorbar(mt.period, mt.Z.phase_yx + 180, marker=self.yx_marker, ms=self.marker_size, mfc=self.yx_mfc, mec=self.yx_color, mew=self.marker_lw, ls=self.yx_ls, yerr=mt.Z.phase_err_yx, ecolor=self.yx_color, capsize=self.marker_size, elinewidth=self.marker_lw) # check the phase to see if any point are outside of [0:90] if self.phase_limits is None: pymin = min(0, min([min(rp.phase_xy), min(rp.phase_yx)])) pymax = max(89.9, max([max(rp.phase_xy), max(rp.phase_yx)])) self.phase_limits = (pymin, pymax) # self.phase_limits = (pymin, pymax) # else: # self.phase_limits = (min(self.phase_limits[0], pymin), # max(self.phase_limits[1], pymax)) # if self.phase_limits is None: # if min(rp.phasexy) < 0 or min(rp.phase_yx) < 0: # pymin = min([min(rp.phase_xy), # min(rp.phase_yx)]) # if pymin > 0: # pymin = 0 # else: # pymin = 0 # # if max(rp.phasexy) > 90 or max(rp.phase_yx) > 90: # pymax = min([max(rp.phase_xy), # YG: should use max instead ?? # max(rp.phase_yx)]) # if pymax < 91: # pymax = 89.9 # YG: why?? # else: # pymax = 89.9 # # self.phase_limits = (pymin, pymax) # --> set axes properties if ii == 0: axp.set_ylabel('Phase (deg)', fontdict) else: plt.setp(axp.get_yticklabels(), visible=False) if self.plot_tipper == 'n' and self.plot_skew == 'n' and \ self.plot_strike == 'n': axp.set_xlabel('Period (s)', fontdict) axp.set_xscale('log', nonposx='clip') if self.phase_limits is None: self.phase_limits = (-179.9,179.9) axp.set_ylim(self.phase_limits) axp.set_xlim(self.x_limits) axp.yaxis.set_major_locator(MultipleLocator(15)) axp.yaxis.set_minor_locator(MultipleLocator(5)) axp.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) tklabels = [mtpl.labeldict[tt] for tt in np.arange(np.log10(self.xlimits[0]), np.log10(self.xlimits[1]) + 1)] tklabels[0] = '' tklabels[-1] = '' axp.set_xticklabels(tklabels, fontdict={'size': self.font_size}) if len(list(pdict.keys())) > 2: plt.setp(axp.xaxis.get_ticklabels(), visible=False) plt.setp(axp.xaxis.get_label(), visible=False) # -----plot tipper-------------------------------------------- if self._plot_tipper.find('y') == 0: plt.setp(axp.xaxis.get_ticklabels(), visible=False) tp = mt.Tipper txr = tp.mag_real * np.sin(tp.angle_real * np.pi / 180 + \ np.pi * self.arrow_direction) tyr = tp.mag_real * np.cos(tp.angle_real * np.pi / 180 + \ np.pi * self.arrow_direction) txi = tp.mag_imag * np.sin(tp.angle_imag * np.pi / 180 + \ np.pi * self.arrow_direction) tyi = tp.mag_imag * np.cos(tp.angle_imag * np.pi / 180 + \ np.pi * self.arrow_direction) nt = len(txr) tiplist = [] tiplabel = [] for aa in range(nt): xlenr = txr[aa] * mt.period[aa] xleni = txi[aa] * mt.period[aa] # --> plot real arrows if self._plot_tipper.find('r') > 0: axt.arrow(np.log10(mt.period[aa]), 0, xlenr, tyr[aa], lw=self.arrow_lw, facecolor=self.arrow_color_real, edgecolor=self.arrow_color_real, head_width=self.arrow_head_width, head_length=self.arrow_head_length, length_includes_head=False) if aa == 0: line1 = axt.plot(0, 0, self.arrow_color_real) tiplist.append(line1[0]) tiplabel.append('real') # --> plot imaginary arrows if self.plot_tipper.find('i') > 0: axt.arrow(np.log10(mt.period[aa]), 0, xleni, tyi[aa], facecolor=self.arrow_color_imag, edgecolor=self.arrow_color_imag, lw=self.arrow_lw, head_width=self.arrow_head_width, head_length=self.arrow_head_length, length_includes_head=False) if aa == 0: line2 = axt.plot(0, 0, self.arrow_color_imag) tiplist.append(line2[0]) tiplabel.append('imag') # make a line at 0 for reference axt.plot(mt.period, [0] * nt, 'k', lw=.5) if ii == 0: axt.legend(tiplist, tiplabel, loc='upper left', markerscale=1, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.1, prop={'size': self.font_size}) axt.set_ylabel('Tipper', fontdict=fontdict) else: plt.setp(axt.get_yticklabels(), visible=False) # set axis properties axt.yaxis.set_major_locator(MultipleLocator(.2)) axt.yaxis.set_minor_locator(MultipleLocator(.1)) axt.set_xlabel('Period (s)', fontdict=fontdict) axt.set_xscale('log', nonposx='clip') if self.tipper_limits is None: tmax = max([tyr.max(), tyi.max()]) if tmax > 1: tmax = .899 tmin = min([tyr.min(), tyi.min()]) if tmin < -1: tmin = -.899 self.tipper_limits = (tmin - .1, tmax + .1) axt.set_ylim(self.tipper_limits) axt.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) tklabels = [] xticks = [] for tk in axt.get_xticks(): try: tklabels.append(mtpl.labeldict[tk]) xticks.append(tk) except KeyError: pass axt.set_xticks(xticks) axt.set_xticklabels(tklabels, fontdict={'size': self.font_size}) if pdict['tip'] != nrows - 1: plt.setp(axt.get_yticklabels(), visible=False) # need to reset the xlimits caouse they get reset when calling # set_ticks for some reason axt.set_xlim(np.log10(self.xlimits[0]), np.log10(self.xlimits[1])) # ------plot strike angles---------------------------------------------- if self._plot_strike.find('y') == 0: if self._plot_strike.find('i') > 0: # strike from invariants zinv = Zinvariants(mt.Z) s1 = zinv.strike # fold angles so go from -90 to 90 s1[np.where(s1 > 90)] -= -180 s1[np.where(s1 < -90)] += 180 # plot strike with error bars ps1 = axst.errorbar(mt.period, s1, marker=self.strike_inv_marker, ms=self.marker_size, mfc=self.strike_inv_color, mec=self.strike_inv_color, mew=self.marker_lw, ls='none', yerr=zinv.strike_err, ecolor=self.strike_inv_color, capsize=self.marker_size, elinewidth=self.marker_lw) stlist.append(ps1[0]) stlabel.append('Z_inv') st_maxlist.append(s1.max()) st_minlist.append(s1.min()) if self._plot_strike.find('p') > 0: # strike from phase tensor pt = mt.pt # type: PhaseTensor s2, s2_err = pt.azimuth, pt.azimuth_err # fold angles to go from -90 to 90 s2[np.where(s2 > 90)] -= 180 s2[np.where(s2 < -90)] += 180 # plot strike with error bars ps2 = axst.errorbar(mt.period, s2, marker=self.strike_pt_marker, ms=self.marker_size, mfc=self.strike_pt_color, mec=self.strike_pt_color, mew=self.marker_lw, ls='none', yerr=s2_err, ecolor=self.strike_pt_color, capsize=self.marker_size, elinewidth=self.marker_lw) stlist.append(ps2[0]) stlabel.append('PT') st_maxlist.append(s2.max()) st_minlist.append(s2.min()) if self._plot_strike.find('t') > 0: # strike from tipper tp = mt.Tipper s3 = tp.angle_real + 90 # fold to go from -90 to 90 s3[np.where(s3 > 90)] -= 180 s3[np.where(s3 < -90)] += 180 # plot strike with error bars ps3 = axst.errorbar(mt.period, s3, marker=self.strike_tip_marker, ms=self.marker_size, mfc=self.strike_tip_color, mec=self.strike_tip_color, mew=self.marker_lw, ls='none', yerr=np.zeros_like(s3), ecolor=self.strike_tip_color, capsize=self.marker_size, elinewidth=self.marker_lw) stlist.append(ps3[0]) stlabel.append('Tip') st_maxlist.append(s3.max()) st_minlist.append(s3.min()) # --> set axes properties if self.strike_limits is None: stmin = min(st_minlist) if stmin - 3 < -90: stmin -= 3 else: stmin = -89.99 stmax = max(st_maxlist) if stmin + 3 < 90: stmin += 3 else: stmin = 89.99 self.strike_limits = (-max([abs(stmin), abs(stmax)]), max([abs(stmin), abs(stmax)])) axst.plot(axr.get_xlim(), [0, 0], color='k', lw=.5) if ii == 0: axst.set_ylabel('Strike', fontdict=fontdict) else: plt.setp(axst.get_yticklabels(), visible=False) axst.set_xlabel('Period (s)', fontdict=fontdict) axst.set_ylim(self.strike_limits) axst.yaxis.set_major_locator(MultipleLocator(30)) axst.yaxis.set_minor_locator(MultipleLocator(5)) axst.set_xscale('log', nonposx='clip') axst.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) if ii == 0: try: axst.legend(stlist, stlabel, loc=3, markerscale=1, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.02, prop={'size': self.font_size - 1}) except: pass # set th xaxis tick labels to invisible if pdict['strike'] != nrows - 1: plt.setp(axst.xaxis.get_ticklabels(), visible=False) # ------plot skew angle--------------------------------------------- if self._plot_skew == 'y': # strike from phase tensor pt = mt.pt sk, sk_err = pt.beta, pt.beta_err ps4 = axsk.errorbar(mt.period, sk, marker=self.skew_marker, ms=self.marker_size, mfc=self.skew_color, mec=self.skew_color, mew=self.marker_lw, ls='none', yerr=sk_err, ecolor=self.skew_color, capsize=self.marker_size, elinewidth=self.marker_lw) stlist.append(ps4[0]) stlabel.append('Skew') if self.skew_limits is None: self.skew_limits = (-9, 9) axsk.set_ylim(self.skew_limits) axsk.yaxis.set_major_locator(MultipleLocator(3)) axsk.yaxis.set_minor_locator(MultipleLocator(1)) if ii ==0: axsk.set_ylabel('Skew', fontdict) else: plt.setp(axsk.get_yticklabels(), visible=False) axsk.set_xlabel('Period (s)', fontdict) axsk.set_xscale('log', nonposx='clip') # set th xaxis tick labels to invisible if pdict['skew'] != nrows - 1: plt.setp(axsk.xaxis.get_ticklabels(), visible=False) # ----plot phase tensor ellipse--------------------------------------- if self._plot_pt == 'y': # get phase tensor instance pt = mt.pt cmap = self.ellipse_cmap ckmin = self.ellipse_range[0] ckmax = self.ellipse_range[1] try: ckstep = float(self.ellipse_range[2]) except IndexError: ckstep = 3 if cmap == 'mt_seg_bl2wh2rd': bounds = np.arange(ckmin, ckmax + ckstep, ckstep) nseg = float((ckmax - ckmin) / (2 * ckstep)) # get the properties to color the ellipses by if self.ellipse_colorby == 'phiminang' or \ self.ellipse_colorby == 'phimin': colorarray = pt.phimin elif self.ellipse_colorby == 'phidet': colorarray = np.sqrt(abs(pt.det)) * (180 / np.pi) elif self.ellipse_colorby == 'skew' or \ self.ellipse_colorby == 'skew_seg': colorarray = pt.beta elif self.ellipse_colorby == 'ellipticity': colorarray = pt.ellipticity else: raise NameError(self.ellipse_colorby + ' is not supported') # -------------plot ellipses----------------------------------- for kk, ff in enumerate(mt.period): # make sure the ellipses will be visable eheight = pt.phimin[kk] / pt.phimax[kk] * \ self.ellipse_size ewidth = pt.phimax[kk] / pt.phimax[kk] * \ self.ellipse_size # create an ellipse scaled by phimin and phimax and # oriented along the azimuth which is calculated as # clockwise but needs to be plotted counter-clockwise # hence the negative sign. ellipd = patches.Ellipse((np.log10(ff) * \ self.ellipse_spacing, 0), width=ewidth, height=eheight, angle=90 - pt.azimuth[kk]) axpt.add_patch(ellipd) # get ellipse color if cmap.find('seg') > 0: ellipd.set_facecolor(mtcl.get_plot_color( colorarray[kk], self.ellipse_colorby, cmap, ckmin, ckmax, bounds=bounds)) else: ellipd.set_facecolor(mtcl.get_plot_color( colorarray[kk], self.ellipse_colorby, cmap, ckmin, ckmax)) # ----set axes properties----------------------------------------------- # --> set tick labels and limits axpt.set_xlim(np.floor(np.log10(self.xlimits[0])), np.ceil(np.log10(self.xlimits[1]))) tklabels = [] xticks = [] for tk in axpt.get_xticks(): try: tklabels.append(mtpl.labeldict[tk]) xticks.append(tk) except KeyError: pass axpt.set_xticks(xticks) axpt.set_xticklabels(tklabels, fontdict={'size': self.font_size}) axpt.set_xlabel('Period (s)', fontdict=fontdict) axpt.set_ylim(ymin=-1.5 * self.ellipse_size, ymax=1.5 * self.ellipse_size) axpt.grid(True, alpha=.25, which='major', color=(.25, .25, .25), lw=.25) plt.setp(axpt.get_yticklabels(), visible=False) if pdict['pt'] != nrows - 1: plt.setp(axpt.get_xticklabels(), visible=False) # add colorbar for PT only for first plot if ii == 0: axpos = axpt.get_position() cb_position = (axpos.bounds[0] - .0575, axpos.bounds[1] + .02, .01, axpos.bounds[3] * .75) cbax = self.fig.add_axes(cb_position) if cmap == 'mt_seg_bl2wh2rd': # make a color list clist = [(cc, cc, 1) for cc in np.arange(0, 1 + 1. / (nseg), 1. / (nseg))] + \ [(1, cc, cc) for cc in np.arange(1, -1. / (nseg), -1. / (nseg))] # make segmented colormap mt_seg_bl2wh2rd = colors.ListedColormap(clist) # make bounds so that the middle is white bounds = np.arange(ckmin - ckstep, ckmax + 2 * ckstep, ckstep) # normalize the colors norms = colors.BoundaryNorm(bounds, mt_seg_bl2wh2rd.N) # make the colorbar cbpt = mcb.ColorbarBase(cbax, cmap=mt_seg_bl2wh2rd, norm=norms, orientation='vertical', ticks=bounds[1:-1]) else: cbpt = mcb.ColorbarBase(cbax, cmap=mtcl.cmapdict[cmap], norm=colors.Normalize(vmin=ckmin, vmax=ckmax), orientation='vertical') cbpt.set_ticks([ckmin, (ckmax - ckmin) / 2, ckmax]) cbpt.set_ticklabels(['{0:.0f}'.format(ckmin), '{0:.0f}'.format((ckmax - ckmin) / 2), '{0:.0f}'.format(ckmax)]) cbpt.ax.yaxis.set_label_position('left') cbpt.ax.yaxis.set_label_coords(-1.05, .5) cbpt.ax.yaxis.tick_right() cbpt.ax.tick_params(axis='y', direction='in') cbpt.set_label(mtpl.ckdict[self.ellipse_colorby], fontdict={'size': self.font_size}) # == == Plot the Z_xx, Z_yy components if desired == if self.plot_num == 2: # ---------plot the apparent resistivity---------------- axr2 = self.fig.add_subplot(gs[0, 1], sharex=axr, sharey=axr) axr2.yaxis.set_label_coords(-.1, 0.5) # res_xx ebxxr = axr2.errorbar(mt.period, mt.Z.res_xx, marker=self.xy_marker, ms=self.marker_size, mfc=self.xy_mfc, mec=self.xy_color, mew=self.marker_lw, ls=self.xy_ls, yerr=mt.Z.res_err_xx, ecolor=self.xy_color, capsize=self.marker_size, elinewidth=self.marker_lw) # res_yy ebyyr = axr2.errorbar(mt.period, mt.Z.res_yy, marker=self.yx_marker, ms=self.marker_size, mfc=self.yx_mfc, mec=self.yx_color, mew=self.marker_lw, ls=self.yx_ls, yerr=mt.Z.res_err_yy, ecolor=self.yx_color, capsize=self.marker_size, elinewidth=self.marker_lw) # --> set axes properties plt.setp(axr2.get_xticklabels(), visible=False) plt.setp(axr2.get_yticklabels(), visible=False) axr2.set_yscale('log', nonposy='clip') axr2.set_xscale('log', nonposx='clip') axr2.set_xlim(self.x_limits) axr2.set_ylim(self.res_limits) axr2.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) if ii == 0: axr2.legend((ebxxr[0], ebyyr[0]), ('$Z_{xx}$', '$Z_{yy}$'), loc=3, markerscale=1, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.02) # -----Plot the phase----------------------------------- axp2 = self.fig.add_subplot(gs[1, 1], sharex=axr, sharey=axp) axp2.yaxis.set_label_coords(-.1, 0.5) # phase_xx ebxxp = axp2.errorbar(mt.period, mt.Z.phase_xx, marker=self.xy_marker, ms=self.marker_size, mfc=self.xy_mfc, mec=self.xy_color, mew=self.marker_lw, ls=self.xy_ls, yerr=mt.Z.phase_err_xx, ecolor=self.xy_color, capsize=self.marker_size, elinewidth=self.marker_lw) # phase_yy ebyyp = axp2.errorbar(mt.period, mt.Z.phase_yy, marker=self.yx_marker, ms=self.marker_size, mfc=self.yx_mfc, mec=self.yx_color, mew=self.marker_lw, ls=self.yx_ls, yerr=mt.Z.phase_err_yy, ecolor=self.yx_color, capsize=self.marker_size, elinewidth=self.marker_lw) # --> set axes properties plt.setp(axp2.get_xticklabels(), visible=False) plt.setp(axp2.get_yticklabels(), visible=False) axp2.set_xlabel('Period (s)', fontdict) axp2.set_xscale('log', nonposx='clip') if self.phase_limits is None: self.phase_limits=(-179.9,179.9) axp2.set_ylim(self.phase_limits) axp2.set_xlim(self.x_limits) axp2.yaxis.set_major_locator(MultipleLocator(30)) axp2.yaxis.set_minor_locator(MultipleLocator(5)) # axp2.set_xticklabels(tklabels, # fontdict={'size': self.font_size}) axp2.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) if len(list(pdict.keys())) > 2: plt.setp(axp2.xaxis.get_ticklabels(), visible=False) plt.setp(axp2.xaxis.get_label(), visible=False) # == =Plot the Determinant if desired == == == == if self.plot_num == 3: # res_det ebdetr = axr.errorbar(mt.period, rp.res_det, marker=self.det_marker, ms=self.marker_size, mfc=self.det_mfc, mec=self.det_color, mew=self.marker_lw, ls=self.det_ls, yerr=rp.res_det_err, ecolor=self.det_color, capsize=self.marker_size, elinewidth=self.marker_lw) # phase_det ebdetp = axp.errorbar(mt.period, rp.phase_det, marker=self.det_marker, ms=self.marker_size, mfc=self.det_mfc, mec=self.det_color, mew=self.marker_lw, ls=self.det_ls, yerr=rp.phase_det_err, ecolor=self.det_color, capsize=self.marker_size, elinewidth=self.marker_lw) # --> set axes properties plt.setp(axr.get_xticklabels(), visible=False) if ii == 0: axr.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)', fontdict=fontdict) else: plt.setp(axr.get_yticklabels(), visible=False) axr.set_yscale('log', nonposy='clip') axr.set_xscale('log', nonposx='clip') axr.set_ylim(self.res_limits) axr.set_xlim(self.xlimits) axr.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) # --> set axes properties axp.set_xlabel('Period (s)', fontdict) if ii == 0: axp.set_ylabel('Phase (deg)', fontdict) else: plt.setp(axp.get_yticklabels(), visible=False) axp.set_xscale('log', nonposx='clip') axp.set_ylim(self.phase_limits) axp.yaxis.set_major_locator(MultipleLocator(15)) axp.yaxis.set_minor_locator(MultipleLocator(5)) tklabels = [mtpl.labeldict[tt] for tt in np.arange(np.log10(self.xlimits[0]), np.log10(self.xlimits[1]) + 1)] tklabels[0] = '' tklabels[-1] = '' axp.set_xticklabels(tklabels, fontdict={'size': self.font_size}) axp.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) # make title and show axr.set_title(mt.station, fontsize=self.font_size, fontweight='bold') if show: plt.show() # ===Plot all responses into one plot to compare changes == if self.plot_style == 'compare': ns = len(self.mt_list) # make color lists for the plots going light to dark cxy = [(0, 0 + float(cc) / ns, 1 - float(cc) / ns) for cc in range(ns)] cyx = [(1, float(cc) / ns, 0) for cc in range(ns)] cdet = [(0, 1 - float(cc) / ns, 0) for cc in range(ns)] ctipr = [(.75 * cc / ns, .75 * cc / ns, .75 * cc / ns) for cc in range(ns)] ctipi = [(float(cc) / ns, 1 - float(cc) / ns, .25) for cc in range(ns)] cst = [(.5 * cc / ns, 0, .5 * cc / ns) for cc in range(ns)] # make marker lists for the different components mxy = ['s', 'D', 'x', '+', '*', '1', '3', '4'] * 5 myx = ['o', 'h', '8', 'p', 'H', 7, 4, 6] * 5 legendlistxy = [] legendlistyx = [] stationlist = [] tiplist = [] stlist = [] sklist = [] # set some parameters of the figure and subplot spacing plt.rcParams['font.size'] = self.font_size plt.rcParams['figure.subplot.bottom'] = .1 plt.rcParams['figure.subplot.top'] = .97 plt.rcParams['figure.subplot.left'] = .08 plt.rcParams['figure.subplot.right'] = .98 # set the font properties for the axis labels fontdict = {'size': self.font_size + 1, 'weight': 'bold'} # set figure size according to what the plot will be. if self.fig_size is None: if self.plot_num == 1 or self.plot_num == 3: self.fig_size = [8, 6] pass elif self.plot_num == 2: self.fig_size = [8, 6] nrows += 1 # make a figure instance self.fig = plt.figure(self.fig_num, self.fig_size, dpi=self.fig_dpi) # make a grid as usual, but put xy and yx in different plots # otherwise the plot is too busy to see what's going on. hr = [2, 1.5] + [1] * (nrows - 2) gs = gridspec.GridSpec(nrows, 2, height_ratios=hr, hspace=.05) # --> make figure for xy,yx components if self.plot_num == 1 or self.plot_num == 3: # set label coordinates labelcoords = (-0.125, 0.5) # space out the subplots gs.update(hspace=.05, wspace=.02, left=.1) # --> make figure for all 4 components elif self.plot_num == 2: # set label coordinates labelcoords = (-0.125, 0.5) # space out the subplots gs.update(hspace=.05, wspace=.02, left=.07) for key in pdict: if key != 'res' and key != 'phase': pdict[key] += 1 # --> create the axes instances # apparent resistivity axis self.axrxy = self.fig.add_subplot(gs[0, 0]) self.axryx = self.fig.add_subplot(gs[0, 1], sharex=self.axrxy, sharey=self.axrxy) # phase axis that shares period axis with resistivity self.axpxy = self.fig.add_subplot(gs[1, 0], sharex=self.axrxy) self.axpyx = self.fig.add_subplot(gs[1, 1], sharex=self.axrxy, sharey=self.axpxy) # place y coordinate labels in the same location # self.axrxy.yaxis.set_label_coords(labelcoords[0], labelcoords[1]) # self.axpxy.yaxis.set_label_coords(labelcoords[0], labelcoords[1]) # --> plot tipper try: self.axt = self.fig.add_subplot(gs[pdict['tip'], :]) # self.axt.yaxis.set_label_coords(labelcoords[0] * .5, # labelcoords[1]) except KeyError: pass # --> plot phase tensors try: # can't share axis because not on the same scale self.axpt = self.fig.add_subplot(gs[pdict['pt'], :], aspect='equal') # self.axpt.yaxis.set_label_coords(labelcoords[0] * .5, # labelcoords[1]) except KeyError: pass # --> plot strike try: self.axst = self.fig.add_subplot(gs[pdict['strike'], :], sharex=self.axrxy) # self.axst.yaxis.set_label_coords(labelcoords[0] * .5, # labelcoords[1]) except KeyError: pass # --> plot skew try: self.axsk = self.fig.add_subplot(gs[pdict['skew'], :], sharex=self.axrxy) # self.axsk.yaxis.set_label_coords(labelcoords[0] * .5, # labelcoords[1]) except KeyError: pass for ii, mt in enumerate(self.mt_list): # get the reistivity and phase object # set x-axis limits from short period to long period if self.xlimits is None: self.xlimits = (10 ** (np.floor(np.log10(mt.period.min()))), 10 ** (np.ceil(np.log10(mt.period.max())))) else: self.xlimits = (10 ** min([np.floor(np.log10(self.xlimits[0])), np.floor(np.log10(mt.period.min()))]), 10 ** max([np.ceil(np.log10(self.xlimits[1])), np.ceil(np.log10(mt.period.max()))])) if self.phase_limits is None: self.phase_limits = (0, 89.9) stationlist.append(mt.station) # == == == == =Plot Z_xy and Z_yx == # if self.plot_num == 1 or self.plot_num == 2: # ---------plot the apparent resistivity-------------------- # --> plot as error bars and just as points xy-blue, yx-red # res_xy ebxyr = self.axrxy.errorbar(mt.period, mt.Z.res_xy, color=cxy[ii], marker=mxy[ii % len(mxy)], ms=self.marker_size, mfc='None', mec=cxy[ii], mew=self.marker_lw, ls=self.xy_ls, yerr=mt.Z.res_err_xy, ecolor=cxy[ii], capsize=self.marker_size, elinewidth=self.marker_lw) # res_yx ebyxr = self.axryx.errorbar(mt.period, mt.Z.res_yx, color=cyx[ii], marker=myx[ii % len(myx)], ms=self.marker_size, mfc='None', mec=cyx[ii], mew=self.marker_lw, ls=self.yx_ls, yerr=mt.Z.res_err_yx, ecolor=cyx[ii], capsize=self.marker_size, elinewidth=self.marker_lw) # -----Plot the phase--------------------------------------- # phase_xy self.axpxy.errorbar(mt.period, mt.Z.phase_xy, color=cxy[ii], marker=mxy[ii % len(mxy)], ms=self.marker_size, mfc='None', mec=cxy[ii], mew=self.marker_lw, ls=self.xy_ls, yerr=mt.Z.phase_err_xy, ecolor=cxy[ii], capsize=self.marker_size, elinewidth=self.marker_lw) # phase_yx: Note add 180 to place it in same quadrant as # phase_xy self.axpyx.errorbar(mt.period, mt.Z.phase_yx + 180, color=cyx[ii], marker=myx[ii % len(myx)], ms=self.marker_size, mfc='None', mec=cyx[ii], mew=self.marker_lw, ls=self.yx_ls, yerr=mt.Z.phase_err_yx, ecolor=cyx[ii], capsize=self.marker_size, elinewidth=self.marker_lw) legendlistxy.append(ebxyr) legendlistyx.append(ebyxr) # ==== Plot the Z_xx, Z_yy components if desired == if self.plot_num == 2: # ---------plot the apparent resistivity---------------- self.axr2xx = self.fig.add_subplot(gs[2, 0], sharex=self.axrxy) self.axr2xx.yaxis.set_label_coords(-.095, 0.5) self.axr2yy = self.fig.add_subplot(gs[2, 1], sharex=self.axrxy) # res_xx ebxxr = self.axr2xx.errorbar(mt.period, mt.Z.res_xx, color=cxy[ii], marker=mxy[ii % len(mxy)], ms=self.marker_size, mfc='None', mec=cxy[ii], mew=self.marker_lw, ls=self.xy_ls, yerr=mt.Z.res_err_xx, ecolor=cxy[ii], capsize=self.marker_size, elinewidth=self.marker_lw) # res_yy ebyyr = self.axr2yy.errorbar(mt.period, mt.Z.res_yy, color=cyx[ii], marker=myx[ii % len(myx)], ms=self.marker_size, mfc='None', mec=cyx[ii], mew=self.marker_lw, ls=self.yx_ls, yerr=mt.Z.res_err_yy, ecolor=cyx[ii], capsize=self.marker_size, elinewidth=self.marker_lw) # -----Plot the phase----------------------------------- self.axp2xx = self.fig.add_subplot(gs[2, 0], sharex=self.axrxy) self.axp2xx.yaxis.set_label_coords(-.095, 0.5) self.axp2yy = self.fig.add_subplot(gs[2, 1], sharex=self.axrxy) # phase_xx ebxxp = self.axp2xx.errorbar(mt.period, mt.Z.phase_xx, color=cxy[ii], marker=mxy[ii % len(mxy)], ms=self.marker_size, mfc='None', mec=cxy[ii], mew=self.marker_lw, ls=self.xy_ls, yerr=mt.Z.phase_err_xx, ecolor=cxy[ii], capsize=self.marker_size, elinewidth=self.marker_lw) # phase_yy ebyyp = self.axp2yy.errorbar(mt.period, mt.Z.phase_yy, color=cyx[ii], marker=myx[ii % len(mxy)], ms=self.marker_size, mfc='None', mec=cyx[ii], mew=self.marker_lw, ls=self.yx_ls, yerr=mt.Z.phase_err_yy, ecolor=cyx[ii], capsize=self.marker_size, elinewidth=self.marker_lw) # ===Plot the Determinant if desired == if self.plot_num == 3: # res_det ebdetr = self.axrxy.errorbar(mt.period, mt.Z.res_det, color=cxy[ii], marker=mxy[ii % len(mxy)], ms=self.marker_size, mfc='None', mec=cdet[ii], mew=self.marker_lw, ls=self.det_ls, yerr=mt.Z.res_det_err, ecolor=cdet[ii], capsize=self.marker_size, elinewidth=self.marker_lw) # phase_det ebdetp = self.axpxy.errorbar(mt.period, mt.Z.phase_det, color=cyx[ii], marker=mxy[ii % len(mxy)], ms=self.marker_size, mfc='None', mec=cdet[ii], mew=self.marker_lw, ls=self.det_ls, yerr=mt.Z.phase_det_err, ecolor=cdet[ii], capsize=self.marker_size, elinewidth=self.marker_lw) legendlistxy.append(ebdetr) # -----plot tipper---------------------------------------------- if self._plot_tipper.find('y') == 0: txr = mt.Tipper.mag_real * np.sin(mt.Tipper.angle_real * np.pi / 180 + \ np.pi * self.arrow_direction) tyr = mt.Tipper.mag_real * np.cos(mt.Tipper.angle_real * np.pi / 180 + \ np.pi * self.arrow_direction) txi = mt.Tipper.mag_imag * np.sin(mt.Tipper.angle_imag * np.pi / 180 + \ np.pi * self.arrow_direction) tyi = mt.Tipper.mag_imag * np.cos(mt.Tipper.angle_imag * np.pi / 180 + \ np.pi * self.arrow_direction) nt = len(txr) for aa in range(nt): xlenr = txr[aa] * np.log10(mt.period[aa]) xleni = txi[aa] * np.log10(mt.period[aa]) if self.tipper_limits is None: tmax = max([tyr.max(), tyi.max()]) tmin = min([tyr.min(), tyi.min()]) if np.isnan(tmax): tmax = 1.0 if np.isnan(tmin): tmin = -1.0 tmin = max([-1, tmin]) tmax = min([1, tmax]) self.tipper_limits = (tmin - .1, tmax + .1) else: tmax = max([tyr.max(), tyi.max(), self.tipper_limits[1] - .1]) + .1 tmin = min([tyr.min(), tyi.min(), self.tipper_limits[0] + .1]) - .1 if np.isnan(tmax): tmax = 1.0 if np.isnan(tmin): tmin = -1.0 tmin = max([-1, tmin]) tmax = min([1, tmax]) self.tipper_limits = (tmin, tmax) # --> plot real arrows if self._plot_tipper.find('r') > 0: self.axt.arrow(np.log10(mt.period[aa]), 0, xlenr, tyr[aa], lw=self.arrow_lw, facecolor=ctipr[ii], edgecolor=ctipr[ii], head_width=self.arrow_head_width, head_length=self.arrow_head_length, length_includes_head=False) # --> plot imaginary arrows if self._plot_tipper.find('i') > 0: self.axt.arrow(np.log10(mt.period[aa]), 0, xleni, tyi[aa], lw=self.arrow_lw, head_width=self.arrow_head_width, head_length=self.arrow_head_length, length_includes_head=False) lt = self.axt.plot(0, 0, lw=1, color=ctipr[ii]) tiplist.append(lt[0]) # ------plot strike angles---------------------------------------------- if self._plot_strike.find('y') == 0: # if self._plot_strike.find('i') > 0: # #strike from invariants # zinv = mt.Z.invariants # s1 = zinv.strike # # #fold angles so go from -90 to 90 # s1[np.where(s1>90)] -= -180 # s1[np.where(s1<-90)] += 180 # # #plot strike with error bars # ps1 = self.axst.errorbar(mt.period, # s1, # marker=mxy[ii % len(mxy)], # ms=self.marker_size, # mfc=cst[ii], # mec=cst[ii], # mew=self.marker_lw, # ls='none', # yerr=zinv.strike_err, # ecolor=cst[ii], # capsize=self.marker_size, # elinewidth=self.marker_lw) # # stlist.append(ps1[0]) if self._plot_strike.find('p') > 0: # strike from phase tensor s2 = mt.pt.azimuth s2_err = mt.pt.azimuth_err # fold angles to go from -90 to 90 s2[np.where(s2 > 90)] -= 180 s2[np.where(s2 < -90)] += 180 # plot strike with error bars ps2 = self.axst.errorbar(mt.period, s2, marker=myx[ii % len(myx)], ms=self.marker_size, mfc=cxy[ii], mec=cxy[ii], mew=self.marker_lw, ls='none', yerr=s2_err, ecolor=cxy[ii], capsize=self.marker_size, elinewidth=self.marker_lw) stlist.append(ps2[0]) if self._plot_strike.find('t') > 0: # strike from tipper s3 = mt.Tipper.angle_real + 90 # fold to go from -90 to 90 s3[np.where(s3 > 90)] -= 180 s3[np.where(s3 < -90)] += 180 # plot strike with error bars ps3 = self.axst.errorbar(mt.period, s3, marker=mxy[ii % len(mxy)], ms=self.marker_size, mfc=ctipr[ii], mec=ctipr[ii], mew=self.marker_lw, ls='none', yerr=np.zeros_like(s3), ecolor=ctipr[ii], capsize=self.marker_size, elinewidth=self.marker_lw) stlist.append(ps3[0]) # ------plot skew angle--------------------------------------------- if self._plot_skew == 'y': # strike from phase tensor sk = mt.pt.beta sk_err = mt.pt.beta_err ps4 = self.axsk.errorbar(mt.period, sk, marker=mxy[ii % len(mxy)], ms=self.marker_size, mfc=cxy[ii], mec=cxy[ii], mew=self.marker_lw, ls='none', yerr=sk_err, ecolor=cxy[ii], capsize=self.marker_size, elinewidth=self.marker_lw) stlist.append(ps4[0]) # ----plot phase tensor ellipse--------------------------------------- if self._plot_pt == 'y': # get phase tensor instance pt = mt.pt cmap = self.ellipse_cmap ckmin = self.ellipse_range[0] ckmax = self.ellipse_range[1] try: ckstep = float(self.ellipse_range[2]) except IndexError: ckstep = 3 if cmap == 'mt_seg_bl2wh2rd': bounds = np.arange(ckmin, ckmax + ckstep, ckstep) nseg = float((ckmax - ckmin) / (2 * ckstep)) # get the properties to color the ellipses by if self.ellipse_colorby == 'phiminang' or \ self.ellipse_colorby == 'phimin': colorarray = mt.pt.phimin elif self.ellipse_colorby == 'phidet': colorarray = np.sqrt(abs(mt.pt.det)) * (180 / np.pi) elif self.ellipse_colorby == 'skew' or \ self.ellipse_colorby == 'skew_seg': colorarray = mt.pt.beta elif self.ellipse_colorby == 'ellipticity': colorarray = mt.pt.ellipticity else: raise NameError(self.ellipse_colorby + ' is not supported') # -------------plot ellipses----------------------------------- for kk, ff in enumerate(mt.period): # make sure the ellipses will be visable eheight = mt.pt.phimin[kk] / mt.pt.phimax[kk] * \ self.ellipse_size ewidth = mt.pt.phimax[kk] / mt.pt.phimax[kk] * \ self.ellipse_size # create an ellipse scaled by phimin and phimax and oriented # along the azimuth which is calculated as clockwise but needs # to be plotted counter-clockwise hence the negative sign. ellipd = patches.Ellipse((np.log10(ff) * self.ellipse_spacing, ii * self.ellipse_size * 1.5), width=ewidth, height=eheight, angle=90 - pt.azimuth[kk]) self.axpt.add_patch(ellipd) # get ellipse color if cmap.find('seg') > 0: ellipd.set_facecolor(mtcl.get_plot_color(colorarray[kk], self.ellipse_colorby, cmap, ckmin, ckmax, bounds=bounds)) else: ellipd.set_facecolor(mtcl.get_plot_color(colorarray[kk], self.ellipse_colorby, cmap, ckmin, ckmax)) ellipd.set_edgecolor(cxy[ii]) # -------set axis properties--------------------------------------- self.axrxy.set_yscale('log', nonposy='clip') self.axrxy.set_xscale('log', nonposx='clip') self.axrxy.set_ylim(self.res_limits) self.axrxy.set_xlim(self.x_limits) self.axrxy.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) # make a text label in upper left hand corner # label the plot with a text box if self.text_location is None: txloc = self.xlimits[0] * self.text_xpad tyloc = self.axrxy.get_ylim()[1] * self.text_ypad else: txloc = self.text_location[0] tyloc = self.text_location[1] self.text = self.axrxy.text(txloc, tyloc, '$Z_{xy}$', fontdict={'size': self.text_size, 'weight': self.text_weight}, verticalalignment='top', horizontalalignment='left', bbox={'facecolor': 'white', 'alpha': 1}) plt.setp(self.axrxy.get_xticklabels(), visible=False) self.axrxy.set_ylabel('App. Resistivity($\Omega \cdot$m)', fontdict=fontdict) self.axryx.set_yscale('log', nonposy='clip') self.axryx.set_xscale('log', nonposx='clip') self.axryx.set_ylim(self.res_limits) self.axryx.set_xlim(self.x_limits) self.axryx.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) self.text = self.axryx.text(txloc, tyloc, '$Z_{yx}$', fontdict={'size': self.text_size, 'weight': self.text_weight}, verticalalignment='top', horizontalalignment='left', bbox={'facecolor': 'white', 'alpha': 1}) plt.setp(self.axryx.get_xticklabels(), visible=False) plt.setp(self.axryx.get_yticklabels(), visible=False) # check the phase to see if any point are outside of [0:90] if self.phase_limits is None: self.phase_limits = (0, 89.99) # --> set axes properties self.axpxy.set_xlabel('Period(s)', fontdict=fontdict) self.axpxy.set_ylabel('Phase(deg)', fontdict=fontdict) self.axpxy.set_xscale('log', nonposx='clip') self.axpxy.set_ylim(self.phase_limits) self.axpxy.yaxis.set_major_locator(MultipleLocator(15)) self.axpxy.yaxis.set_minor_locator(MultipleLocator(5)) self.axpxy.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) if len(list(pdict.keys())) > 2: plt.setp(self.axpxy.xaxis.get_ticklabels(), visible=False) self.axpxy.set_xlabel('') self.axpyx.set_xlabel('Period(s)', fontdict=fontdict) self.axpyx.set_xscale('log', nonposx='clip') self.axpyx.set_ylim(self.phase_limits) self.axpyx.yaxis.set_major_locator(MultipleLocator(15)) self.axpyx.yaxis.set_minor_locator(MultipleLocator(5)) self.axpyx.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) plt.setp(self.axpyx.yaxis.get_ticklabels(), visible=False) if len(list(pdict.keys())) > 2: plt.setp(self.axpyx.xaxis.get_ticklabels(), visible=False) self.axpyx.set_xlabel('') # make legend if self.plot_num == 1: self.axrxy.legend(legendlistxy, stationlist, loc=3, ncol=2, markerscale=.75, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.25) self.axryx.legend(legendlistyx, stationlist, loc=3, ncol=2, markerscale=.75, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.25) elif self.plot_num == 3: llist = [ll[0] for ll in legendlistxy] slist = [ss + '_det' for ss in stationlist] self.axrxy.legend(llist, slist, loc=3, markerscale=.75, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.25) self.axryx.legend(llist, slist, loc=3, markerscale=.75, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.25) if self.plot_num == 2: # --> set axes properties for resxx self.axrxy.set_yscale('log', nonposy='clip') self.axrxy.set_xscale('log', nonposx='clip') self.axrxy.set_xlim(self.x_limits) self.axrxy.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) plt.setp(self.axrxy.get_xticklabels(), visible=False) # --> set axes properties for resyy self.axryx.set_yscale('log', nonposy='clip') self.axryx.set_xscale('log', nonposx='clip') self.axryx.set_xlim(self.x_limits) self.axryx.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) plt.setp(self.axryx.get_xticklabels(), visible=False) # --> set axes properties Phasexx self.axpxy.set_xlabel('Period(s)', fontdict) self.axpxy.set_xscale('log', nonposx='clip') self.axpxy.set_ylim(ymin=-179.9, ymax=179.9) self.axpxy.yaxis.set_major_locator(MultipleLocator(30)) self.axpxy.yaxis.set_minor_locator(MultipleLocator(5)) self.axpxy.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) # --> set axes properties Phaseyy self.axpyx.set_xlabel('Period(s)', fontdict) self.axpyx.set_xscale('log', nonposx='clip') self.axpyx.set_ylim(ymin=-179.9, ymax=179.9) self.axpyx.yaxis.set_major_locator(MultipleLocator(30)) self.axpyx.yaxis.set_minor_locator(MultipleLocator(5)) self.axpyx.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) if len(list(pdict.keys())) > 3: plt.setp(self.axpxy.xaxis.get_ticklabels(), visible=False) self.axpxy.set_xlabel('') plt.setp(self.axpyx.xaxis.get_ticklabels(), visible=False) self.axpyx.set_xlabel('') if self._plot_tipper.find('y') == 0: self.axt.plot(self.axt.get_xlim(), [0, 0], color='k', lw=.5) # --> set axis properties Tipper if self.plot_num == 2: plt.setp(self.axpxy.get_xticklabels(), visible=False) self.axpxy.set_xlabel('') plt.setp(self.axpyx.get_xticklabels(), visible=False) self.axpyx.set_xlabel('') self.axt.yaxis.set_major_locator(MultipleLocator(.2)) self.axt.yaxis.set_minor_locator(MultipleLocator(.1)) self.axt.set_xlabel('Period(s)', fontdict=fontdict) self.axt.set_ylabel('Tipper', fontdict=fontdict) self.axt.set_xlim(np.log10(self.xlimits[0]), np.log10(self.xlimits[1])) tklabels = [] xticks = [] for tk in self.axt.get_xticks(): try: tklabels.append(mtpl.labeldict[tk]) xticks.append(tk) except KeyError: pass self.axt.set_xticks(xticks) self.axt.set_xticklabels(tklabels, fontdict={'size': self.font_size}) self.axt.set_ylim(self.tipper_limits) self.axt.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) self.axt.legend(tiplist, stationlist, loc=3, ncol=2, markerscale=1, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.02) # need to reset the xlimits caouse they get reset when calling # set_ticks for some reason self.axt.set_xlim(np.log10(self.xlimits[0]), np.log10(self.xlimits[1])) if pdict['tip'] != nrows - 1: plt.setp(self.axt.xaxis.get_ticklabels(), visible=False) self.axt.set_xlabel(' ') # --> set axes properties for strike and skew if self._plot_strike[0] == 'y': if self.strike_limits is None: self.strike_limits = (-89.99, 89.99) self.axst.plot(self.axrxy.get_xlim(), [0, 0], color='k', lw=.5) self.axst.set_ylabel('Strike(deg)', fontdict=fontdict) self.axst.set_xlabel('Period(s)', fontdict=fontdict) self.axst.set_ylim(self.strike_limits) self.axst.yaxis.set_major_locator(MultipleLocator(30)) self.axst.yaxis.set_minor_locator(MultipleLocator(5)) self.axst.set_xscale('log', nonposx='clip') self.axst.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) # self.axst.legend(stlist, # stationlist, # loc=3, # ncol=2, # markerscale=1, # borderaxespad=.01, # labelspacing=.07, # handletextpad=.2, # borderpad=.02) if pdict['strike'] != nrows - 1: plt.setp(self.axst.xaxis.get_ticklabels(), visible=False) self.axst.set_xlabel(' ') # --> set axes properties for skew if self._plot_skew == 'y': self.axsk.set_ylim(self.skew_limits) self.axsk.yaxis.set_major_locator(MultipleLocator(3)) self.axsk.yaxis.set_minor_locator(MultipleLocator(1)) self.axsk.set_ylabel('Skew(deg)', fontdict) self.axsk.set_xlabel('Period(s)', fontdict=fontdict) self.axsk.set_xscale('log', nonposx='clip') self.axsk.grid(True, alpha=.25, which='both', color=(.25, .25, .25), lw=.25) # self.axsk.legend(sklist, # stationlist, # loc=4, # ncol=2, # markerscale=1, # borderaxespad=.01, # labelspacing=.07, # handletextpad=.2, # borderpad=.02) if pdict['skew'] != nrows - 1: plt.setp(self.axsk.xaxis.get_ticklabels(), visible=False) self.axsk.set_xlabel(' ') # ----set axes properties for pt----------------------------------- if self._plot_pt == 'y': self.axpt.set_xlim(np.floor(np.log10(self.xlimits[0])) * \ self.ellipse_spacing, np.ceil(np.log10(self.xlimits[1])) * \ self.ellipse_spacing) tklabels = [] xticks = [] for tk in self.axpt.get_xticks(): try: tklabels.append(mtpl.labeldict[tk / self.ellipse_spacing]) xticks.append(tk) except KeyError: pass self.axpt.set_xticks(xticks) self.axpt.set_xticklabels(tklabels, fontdict={'size': self.font_size}) self.axpt.set_xlabel('Period (s)', fontdict=fontdict) self.axpt.set_ylim(ymin=-1.5 * self.ellipse_size, ymax=1.5 * self.ellipse_size * (ii + 1)) self.axpt.grid(True, alpha=.25, which='major', color=(.25, .25, .25), lw=.25) plt.setp(self.axpt.get_yticklabels(), visible=False) if pdict['pt'] != nrows - 1: plt.setp(self.axpt.get_xticklabels(), visible=False) # add colorbar for PT axpos = self.axpt.get_position() cb_position = (axpos.bounds[0] - .0575, axpos.bounds[1] + .02, .01, axpos.bounds[3] * .75) self.cbax = self.fig.add_axes(cb_position) if self.ellipse_cmap == 'mt_seg_bl2wh2rd': # make a color list clist = [(cc, cc, 1) for cc in np.arange(0, 1 + 1. / (nseg), 1. / (nseg))] + \ [(1, cc, cc) for cc in np.arange(1, -1. / (nseg), -1. / (nseg))] # make segmented colormap mt_seg_bl2wh2rd = colors.ListedColormap(clist) # make bounds so that the middle is white bounds = np.arange(ckmin - ckstep, ckmax + 2 * ckstep, ckstep) # normalize the colors norms = colors.BoundaryNorm(bounds, mt_seg_bl2wh2rd.N) # make the colorbar self.cbpt = mcb.ColorbarBase(self.cbax, cmap=mt_seg_bl2wh2rd, norm=norms, orientation='vertical', ticks=bounds[1:-1]) else: self.cbpt = mcb.ColorbarBase(self.cbax, cmap=mtcl.cmapdict[cmap], norm=colors.Normalize(vmin=ckmin, vmax=ckmax), orientation='vertical') self.cbpt.set_ticks([ckmin, (ckmax - ckmin) / 2, ckmax]) self.cbpt.set_ticklabels(['{0:.0f}'.format(ckmin), '{0:.0f}'.format((ckmax - ckmin) / 2), '{0:.0f}'.format(ckmax)]) self.cbpt.ax.yaxis.set_label_position('left') self.cbpt.ax.yaxis.set_label_coords(-1.05, .5) self.cbpt.ax.yaxis.tick_right() self.cbpt.ax.tick_params(axis='y', direction='in') self.cbpt.set_label(mtpl.ckdict[self.ellipse_colorby], fontdict={'size': self.font_size}) if pdict['pt'] != nrows - 1: plt.setp(self.axpt.xaxis.get_ticklabels(), visible=False) self.axpt.set_xlabel(' ') if show: plt.show() def update_plot(self): """ update any parameters that where changed using the built-in draw from canvas. Use this if you change an of the .fig or axes properties :Example: :: >>> # to change the grid lines to only be on the major ticks >>> import mtpy.imaging.mtplottools as mtplot >>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi') >>> [ax.grid(True, which='major') for ax in [p1.axr,p1.axp]] >>> p1.update_plot() """ self.fig.canvas.draw() def redraw_plot(self): """ use this function if you updated some attributes and want to re-plot. :Example: :: >>> # change the color and marker of the xy components >>> import mtpy.imaging.mtplottools as mtplot >>> p1 = mtplot.PlotResPhase(r'/home/MT/mt01.edi') >>> p1.xy_color = (.5,.5,.9) >>> p1.xy_marker = '*' >>> p1.redraw_plot() """ plt.close('all') self.plot() def __str__(self): """ rewrite the string builtin to give a useful message """ return "Plots resistivity and phase for the different modes of the MT \n" + \ "response for multiple sites. At the moment it supports the \n" + \ "input of an .edi file. Other formats that will be supported\n" + \ "are the impedance tensor and errors with an array of periods\n" + \ "and .j format.\n"
gpl-3.0
maheshakya/scikit-learn
sklearn/feature_selection/__init__.py
244
1088
""" The :mod:`sklearn.feature_selection` module implements feature selection algorithms. It currently includes univariate filter selection methods and the recursive feature elimination algorithm. """ from .univariate_selection import chi2 from .univariate_selection import f_classif from .univariate_selection import f_oneway from .univariate_selection import f_regression from .univariate_selection import SelectPercentile from .univariate_selection import SelectKBest from .univariate_selection import SelectFpr from .univariate_selection import SelectFdr from .univariate_selection import SelectFwe from .univariate_selection import GenericUnivariateSelect from .variance_threshold import VarianceThreshold from .rfe import RFE from .rfe import RFECV __all__ = ['GenericUnivariateSelect', 'RFE', 'RFECV', 'SelectFdr', 'SelectFpr', 'SelectFwe', 'SelectKBest', 'SelectPercentile', 'VarianceThreshold', 'chi2', 'f_classif', 'f_oneway', 'f_regression']
bsd-3-clause
seansu4you87/kupo
projects/MOOCs/udacity/ud120-ml/projects/final_project/poi_id.py
9
2364
#!/usr/bin/python import sys import pickle sys.path.append("../tools/") from feature_format import featureFormat, targetFeatureSplit from tester import dump_classifier_and_data ### Task 1: Select what features you'll use. ### features_list is a list of strings, each of which is a feature name. ### The first feature must be "poi". features_list = ['poi','salary'] # You will need to use more features ### Load the dictionary containing the dataset with open("final_project_dataset.pkl", "r") as data_file: data_dict = pickle.load(data_file) ### Task 2: Remove outliers ### Task 3: Create new feature(s) ### Store to my_dataset for easy export below. my_dataset = data_dict ### Extract features and labels from dataset for local testing data = featureFormat(my_dataset, features_list, sort_keys = True) labels, features = targetFeatureSplit(data) ### Task 4: Try a varity of classifiers ### Please name your classifier clf for easy export below. ### Note that if you want to do PCA or other multi-stage operations, ### you'll need to use Pipelines. For more info: ### http://scikit-learn.org/stable/modules/pipeline.html # Provided to give you a starting point. Try a variety of classifiers. from sklearn.naive_bayes import GaussianNB clf = GaussianNB() ### Task 5: Tune your classifier to achieve better than .3 precision and recall ### using our testing script. Check the tester.py script in the final project ### folder for details on the evaluation method, especially the test_classifier ### function. Because of the small size of the dataset, the script uses ### stratified shuffle split cross validation. For more info: ### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html # Example starting point. Try investigating other evaluation techniques! from sklearn.cross_validation import train_test_split features_train, features_test, labels_train, labels_test = \ train_test_split(features, labels, test_size=0.3, random_state=42) ### Task 6: Dump your classifier, dataset, and features_list so anyone can ### check your results. You do not need to change anything below, but make sure ### that the version of poi_id.py that you submit can be run on its own and ### generates the necessary .pkl files for validating your results. dump_classifier_and_data(clf, my_dataset, features_list)
mit
aetilley/scikit-learn
sklearn/tree/tests/test_export.py
76
9318
""" Testing for export functions of decision trees (sklearn.tree.export). """ from numpy.testing import assert_equal from nose.tools import assert_raises from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.tree import export_graphviz from sklearn.externals.six import StringIO # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]] w = [1, 1, 1, .5, .5, .5] def test_graphviz_toy(): # Check correctness of export_graphviz clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1, criterion="gini", random_state=2) clf.fit(X, y) # Test export code out = StringIO() export_graphviz(clf, out_file=out) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]"] ;\n' \ '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test with feature_names out = StringIO() export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"]) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]"] ;\n' \ '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test with class_names out = StringIO() export_graphviz(clf, out_file=out, class_names=["yes", "no"]) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]\\nclass = yes"] ;\n' \ '1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \ 'class = yes"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \ 'class = no"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test plot_options out = StringIO() export_graphviz(clf, out_file=out, filled=True, impurity=False, proportion=True, special_characters=True, rounded=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled, rounded", color="black", ' \ 'fontname=helvetica] ;\n' \ 'edge [fontname=helvetica] ;\n' \ '0 [label=<X<SUB>0</SUB> &le; 0.0<br/>samples = 100.0%<br/>' \ 'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \ '1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \ 'fillcolor="#e58139ff"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \ 'fillcolor="#399de5ff"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '}' assert_equal(contents1, contents2) # Test max_depth out = StringIO() export_graphviz(clf, out_file=out, max_depth=0, class_names=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box] ;\n' \ '0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \ 'value = [3, 3]\\nclass = y[0]"] ;\n' \ '1 [label="(...)"] ;\n' \ '0 -> 1 ;\n' \ '2 [label="(...)"] ;\n' \ '0 -> 2 ;\n' \ '}' assert_equal(contents1, contents2) # Test max_depth with plot_options out = StringIO() export_graphviz(clf, out_file=out, max_depth=0, filled=True, node_ids=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled", color="black"] ;\n' \ '0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \ 'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \ '1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \ '0 -> 1 ;\n' \ '2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \ '0 -> 2 ;\n' \ '}' assert_equal(contents1, contents2) # Test multi-output with weighted samples clf = DecisionTreeClassifier(max_depth=2, min_samples_split=1, criterion="gini", random_state=2) clf = clf.fit(X, y2, sample_weight=w) out = StringIO() export_graphviz(clf, out_file=out, filled=True, impurity=False) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled", color="black"] ;\n' \ '0 [label="X[0] <= 0.0\\nsamples = 6\\n' \ 'value = [[3.0, 1.5, 0.0]\\n' \ '[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \ '1 [label="X[1] <= -1.5\\nsamples = 3\\n' \ 'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \ 'fillcolor="#e5813965"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="True"] ;\n' \ '2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \ '[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \ '1 -> 2 ;\n' \ '3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \ '[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \ '1 -> 3 ;\n' \ '4 [label="X[0] <= 1.5\\nsamples = 3\\n' \ 'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \ 'fillcolor="#e5813965"] ;\n' \ '0 -> 4 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="False"] ;\n' \ '5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \ '[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \ '4 -> 5 ;\n' \ '6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \ '[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \ '4 -> 6 ;\n' \ '}' assert_equal(contents1, contents2) # Test regression output with plot_options clf = DecisionTreeRegressor(max_depth=3, min_samples_split=1, criterion="mse", random_state=2) clf.fit(X, y) out = StringIO() export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True, rotate=True, rounded=True) contents1 = out.getvalue() contents2 = 'digraph Tree {\n' \ 'node [shape=box, style="filled, rounded", color="black", ' \ 'fontname=helvetica] ;\n' \ 'graph [ranksep=equally, splines=polyline] ;\n' \ 'edge [fontname=helvetica] ;\n' \ 'rankdir=LR ;\n' \ '0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \ 'value = 0.0", fillcolor="#e581397f"] ;\n' \ '1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \ 'fillcolor="#e5813900"] ;\n' \ '0 -> 1 [labeldistance=2.5, labelangle=-45, ' \ 'headlabel="True"] ;\n' \ '2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \ 'fillcolor="#e58139ff"] ;\n' \ '0 -> 2 [labeldistance=2.5, labelangle=45, ' \ 'headlabel="False"] ;\n' \ '{rank=same ; 0} ;\n' \ '{rank=same ; 1; 2} ;\n' \ '}' assert_equal(contents1, contents2) def test_graphviz_errors(): # Check for errors of export_graphviz clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1) clf.fit(X, y) # Check feature_names error out = StringIO() assert_raises(IndexError, export_graphviz, clf, out, feature_names=[]) # Check class_names error out = StringIO() assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
bsd-3-clause
danche354/Sequence-Labeling
chunk/senna-raw-hash-pos-128-64.py
1
7266
from keras.models import Model from keras.layers import Input, Masking, Dense, LSTM from keras.layers import Dropout, TimeDistributed, Bidirectional, merge from keras.layers.embeddings import Embedding from keras.utils import np_utils import numpy as np import pandas as pd import sys import math import os from datetime import datetime # add path sys.path.append('../') sys.path.append('../tools') from tools import conf from tools import load_data from tools import prepare from tools import plot np.random.seed(0) # train hyperparameters step_length = conf.chunk_step_length pos_length = conf.chunk_pos_length feature_length = conf.chunk_feature_length emb_vocab = conf.senna_vocab emb_length = conf.senna_length hash_vocab = conf.chunk_hash_vocab hash_length = conf.chunk_hash_length output_length = conf.chunk_NP_length split_rate = conf.chunk_split_rate batch_size = conf.batch_size nb_epoch = conf.nb_epoch model_name = os.path.basename(__file__)[:-3] folder_path = 'model/%s'%model_name if not os.path.isdir(folder_path): os.makedirs(folder_path) # the data, shuffled and split between train and test sets train_data, dev_data = load_data.load_chunk(dataset='train.txt', split_rate=split_rate) train_samples = len(train_data) dev_samples = len(dev_data) print('train shape:', train_samples) print('dev shape:', dev_samples) print() word_embedding = pd.read_csv('../preprocessing/senna/embeddings.txt', delimiter=' ', header=None) word_embedding = word_embedding.values word_embedding = np.concatenate([np.zeros((1,emb_length)),word_embedding, np.random.uniform(-1,1,(1,emb_length))]) # hash_embedding = pd.read_csv('../preprocessing/chunk-auto-encoder/auto-encoder-embeddings.txt', delimiter=' ', header=None) # hash_embedding = hash_embedding.values # hash_embedding = np.concatenate([np.zeros((1,hash_length)),hash_embedding, np.random.randn(1,hash_length)]) embed_index_input = Input(shape=(step_length,)) embedding = Embedding(emb_vocab+2, emb_length, weights=[word_embedding], mask_zero=True, input_length=step_length)(embed_index_input) hash_representation_input = Input(shape=(step_length,feature_length)) # encoder_embedding = Embedding(hash_vocab+2, hash_length, weights=[hash_embedding], mask_zero=True, input_length=step_length)(hash_index_input) pos_input = Input(shape=(step_length, pos_length)) senna_hash_pos_merge = merge([embedding, hash_representation_input, pos_input], mode='concat') input_mask = Masking(mask_value=0)(senna_hash_pos_merge) dp_1 = Dropout(0.5)(input_mask) hidden_1 = Bidirectional(LSTM(128, return_sequences=True))(dp_1) hidden_2 = Bidirectional(LSTM(64, return_sequences=True))(hidden_1) dp_2 = Dropout(0.5)(hidden_2) output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2) model = Model(input=[embed_index_input,hash_representation_input,pos_input], output=output) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) print(model.summary()) number_of_train_batches = int(math.ceil(float(train_samples)/batch_size)) number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size)) print('start train %s ...\n'%model_name) best_accuracy = 0 best_epoch = 0 all_train_loss = [] all_dev_loss = [] all_dev_accuracy = [] log = open('%s/model_log.txt'%folder_path, 'w') start_time = datetime.now() print('train start at %s\n'%str(start_time)) log.write('train start at %s\n\n'%str(start_time)) for epoch in range(nb_epoch): start = datetime.now() print('-'*60) print('epoch %d start at %s'%(epoch, str(start))) log.write('-'*60+'\n') log.write('epoch %d start at %s\n'%(epoch, str(start))) train_loss = 0 dev_loss = 0 np.random.shuffle(train_data) for i in range(number_of_train_batches): train_batch = train_data[i*batch_size: (i+1)*batch_size] embed_index, hash_repesentation, pos, label, length, sentence = prepare.prepare_chunk_raw(batch=train_batch) hash_repesentation = [each.toarray() for each in hash_repesentation] hash_repesentation = np.array([np.concatenate([h, np.zeros((step_length-length[l], feature_length))]) for l, h in enumerate(hash_repesentation)]) pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)]) y = np.array([np_utils.to_categorical(each, output_length) for each in label]) train_metrics = model.train_on_batch([embed_index, hash_repesentation, pos], y) train_loss += train_metrics[0] all_train_loss.append(train_loss) correct_predict = 0 all_predict = 0 for j in range(number_of_dev_batches): dev_batch = dev_data[j*batch_size: (j+1)*batch_size] embed_index, hash_repesentation, pos, label, length, sentence = prepare.prepare_chunk_raw(batch=dev_batch) hash_repesentation = np.array([each.toarray() for each in hash_repesentation]) hash_repesentation = np.array([np.concatenate([h, np.zeros((step_length-length[l], feature_length))]) for l, h in enumerate(hash_repesentation)]) pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)]) y = np.array([np_utils.to_categorical(each, output_length) for each in label]) # for loss dev_metrics = model.test_on_batch([embed_index, hash_repesentation, pos], y) dev_loss += dev_metrics[0] # for accuracy prob = model.predict_on_batch([embed_index, hash_repesentation, pos]) for i, l in enumerate(length): predict_label = np_utils.categorical_probas_to_classes(prob[i]) correct_predict += np.sum(predict_label[:l]==label[i][:l]) all_predict += np.sum(length) epcoh_accuracy = float(correct_predict)/all_predict all_dev_accuracy.append(epcoh_accuracy) all_dev_loss.append(dev_loss) if epcoh_accuracy>=best_accuracy: best_accuracy = epcoh_accuracy best_epoch = epoch end = datetime.now() model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True) print('epoch %d end at %s'%(epoch, str(end))) print('epoch %d train loss: %f'%(epoch, train_loss)) print('epoch %d dev loss: %f'%(epoch, dev_loss)) print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy)) print('best epoch now: %d\n'%best_epoch) log.write('epoch %d end at %s\n'%(epoch, str(end))) log.write('epoch %d train loss: %f\n'%(epoch, train_loss)) log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss)) log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy)) log.write('best epoch now: %d\n\n'%best_epoch) end_time = datetime.now() print('train end at %s\n'%str(end_time)) log.write('train end at %s\n\n'%str(end_time)) timedelta = end_time - start_time print('train cost time: %s\n'%str(timedelta)) print('best epoch last: %d\n'%best_epoch) log.write('train cost time: %s\n\n'%str(timedelta)) log.write('best epoch last: %d\n\n'%best_epoch) plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name) plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
mit
Transkribus/TranskribusDU
TranskribusDU/graph/FeatureDefinition_PageXml_std_noText.py
1
8579
# -*- coding: utf-8 -*- """ Standard PageXml features, but using a QuantileTransformer for numerical features instead of a StandardScaler Copyright Xerox(C) 2016 JL. Meunier Developed for the EU project READ. The READ project has received funding from the European Union�s Horizon 2020 research and innovation programme under grant agreement No 674943. """ from sklearn.pipeline import Pipeline, FeatureUnion #not robust to empty arrays, so use our robust intermediary class instead #from sklearn.preprocessing import StandardScaler from .Transformer import EmptySafe_QuantileTransformer as QuantileTransformer from .Transformer import TransformerListByType from .Transformer_PageXml import NodeTransformerXYWH, NodeTransformerNeighbors, Node1HotFeatures_noText from .Transformer_PageXml import EdgeBooleanFeatures, EdgeNumericalSelector_noText from .FeatureDefinition import FeatureDefinition class FeatureDefinition_PageXml_StandardOnes_noText(FeatureDefinition): n_QUANTILES = 16 def __init__(self): FeatureDefinition.__init__(self) # self.n_tfidf_node, self.t_ngrams_node, self.b_tfidf_node_lc = n_tfidf_node, t_ngrams_node, b_tfidf_node_lc # self.n_tfidf_edge, self.t_ngrams_edge, self.b_tfidf_edge_lc = n_tfidf_edge, t_ngrams_edge, b_tfidf_edge_lc # tdifNodeTextVectorizer = TfidfVectorizer(lowercase=self.b_tfidf_node_lc, max_features=self.n_tfidf_node # , analyzer = 'char', ngram_range=self.t_ngrams_node #(2,6) # , dtype=np.float64) node_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!! ("xywh", Pipeline([ ('selector', NodeTransformerXYWH()), #v1 ('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling ('xywh', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling ]) ) , ("neighbors", Pipeline([ ('selector', NodeTransformerNeighbors()), #v1 ('neighbors', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling ('neighbors', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling ]) ) , ("1hot", Pipeline([ ('1hot', Node1HotFeatures_noText()) #does the 1-hot encoding directly ]) ) ]) lEdgeFeature = [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!! ("boolean", Pipeline([ ('boolean', EdgeBooleanFeatures()) ]) ) , ("numerical", Pipeline([ ('selector', EdgeNumericalSelector_noText()), #v1 ('numerical', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling ('numerical', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling ]) ) ] edge_transformer = FeatureUnion( lEdgeFeature ) #return _node_transformer, _edge_transformer, tdifNodeTextVectorizer self._node_transformer = node_transformer self._edge_transformer = edge_transformer self.tfidfNodeTextVectorizer = None #tdifNodeTextVectorizer # def cleanTransformers(self): # """ # the TFIDF transformers are keeping the stop words => huge pickled file!!! # # Here the fix is a bit rough. There are better ways.... # JL # """ # self._node_transformer.transformer_list[0][1].steps[1][1].stop_words_ = None #is 1st in the union... # for i in [2, 3, 4, 5, 6, 7]: # self._edge_transformer.transformer_list[i][1].steps[1][1].stop_words_ = None #are 3rd and 4th in the union.... # return self._node_transformer, self._edge_transformer class FeatureDefinition_T_PageXml_StandardOnes_noText_v4(FeatureDefinition): """ Multitype version: so the node_transformer actually is a list of node_transformer of length n_class the edge_transformer actually is a list of node_transformer of length n_class^2 We also inherit from FeatureDefinition_T !!! """ n_QUANTILES = 16 def __init__(self, **kwargs): FeatureDefinition.__init__(self, **kwargs) nbTypes = self._getTypeNumber(kwargs) node_transformer = TransformerListByType([ FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!! ("xywh", Pipeline([ ('selector', NodeTransformerXYWH()), #v1 ('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling ('xywh', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling ]) ) , ("neighbors", Pipeline([ ('selector', NodeTransformerNeighbors()), #v1 ('neighbors', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling ('neighbors', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling ]) ) , ("1hot", Pipeline([ ('1hot', Node1HotFeatures_noText()) #does the 1-hot encoding directly ]) ) ]) for _i in range(nbTypes) ]) edge_transformer = TransformerListByType([ FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!! ("boolean", Pipeline([ ('boolean', EdgeBooleanFeatures()) ]) ) , ("numerical", Pipeline([ ('selector', EdgeNumericalSelector_noText()), #v1 ('numerical', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling ('numerical', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling ]) ) ] ) for _i in range(nbTypes*nbTypes) ]) #return _node_transformer, _edge_transformer, tdifNodeTextVectorizer self._node_transformer = node_transformer self._edge_transformer = edge_transformer
bsd-3-clause
ahoyosid/scikit-learn
examples/neighbors/plot_approximate_nearest_neighbors_scalability.py
225
5719
""" ============================================ Scalability of Approximate Nearest Neighbors ============================================ This example studies the scalability profile of approximate 10-neighbors queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200`` when varying the number of samples in the dataset. The first plot demonstrates the relationship between query time and index size of LSHForest. Query time is compared with the brute force method in exact nearest neighbor search for the same index sizes. The brute force queries have a very predictable linear scalability with the index (full scan). LSHForest index have sub-linear scalability profile but can be slower for small datasets. The second plot shows the speedup when using approximate queries vs brute force exact queries. The speedup tends to increase with the dataset size but should reach a plateau typically when doing queries on datasets with millions of samples and a few hundreds of dimensions. Higher dimensional datasets tends to benefit more from LSHForest indexing. The break even point (speedup = 1) depends on the dimensionality and structure of the indexed data and the parameters of the LSHForest index. The precision of approximate queries should decrease slowly with the dataset size. The speed of the decrease depends mostly on the LSHForest parameters and the dimensionality of the data. """ from __future__ import division print(__doc__) # Authors: Maheshakya Wijewardena <[email protected]> # Olivier Grisel <[email protected]> # # License: BSD 3 clause ############################################################################### import time import numpy as np from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import LSHForest from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt # Parameters of the study n_samples_min = int(1e3) n_samples_max = int(1e5) n_features = 100 n_centers = 100 n_queries = 100 n_steps = 6 n_iter = 5 # Initialize the range of `n_samples` n_samples_values = np.logspace(np.log10(n_samples_min), np.log10(n_samples_max), n_steps).astype(np.int) # Generate some structured data rng = np.random.RandomState(42) all_data, _ = make_blobs(n_samples=n_samples_max + n_queries, n_features=n_features, centers=n_centers, shuffle=True, random_state=0) queries = all_data[:n_queries] index_data = all_data[n_queries:] # Metrics to collect for the plots average_times_exact = [] average_times_approx = [] std_times_approx = [] accuracies = [] std_accuracies = [] average_speedups = [] std_speedups = [] # Calculate the average query time for n_samples in n_samples_values: X = index_data[:n_samples] # Initialize LSHForest for queries of a single neighbor lshf = LSHForest(n_estimators=20, n_candidates=200, n_neighbors=10).fit(X) nbrs = NearestNeighbors(algorithm='brute', metric='cosine', n_neighbors=10).fit(X) time_approx = [] time_exact = [] accuracy = [] for i in range(n_iter): # pick one query at random to study query time variability in LSHForest query = queries[rng.randint(0, n_queries)] t0 = time.time() exact_neighbors = nbrs.kneighbors(query, return_distance=False) time_exact.append(time.time() - t0) t0 = time.time() approx_neighbors = lshf.kneighbors(query, return_distance=False) time_approx.append(time.time() - t0) accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean()) average_time_exact = np.mean(time_exact) average_time_approx = np.mean(time_approx) speedup = np.array(time_exact) / np.array(time_approx) average_speedup = np.mean(speedup) mean_accuracy = np.mean(accuracy) std_accuracy = np.std(accuracy) print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, " "accuracy: %0.2f +/-%0.2f" % (n_samples, average_time_exact, average_time_approx, average_speedup, mean_accuracy, std_accuracy)) accuracies.append(mean_accuracy) std_accuracies.append(std_accuracy) average_times_exact.append(average_time_exact) average_times_approx.append(average_time_approx) std_times_approx.append(np.std(time_approx)) average_speedups.append(average_speedup) std_speedups.append(np.std(speedup)) # Plot average query time against n_samples plt.figure() plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx, fmt='o-', c='r', label='LSHForest') plt.plot(n_samples_values, average_times_exact, c='b', label="NearestNeighbors(algorithm='brute', metric='cosine')") plt.legend(loc='upper left', fontsize='small') plt.ylim(0, None) plt.ylabel("Average query time in seconds") plt.xlabel("n_samples") plt.grid(which='both') plt.title("Impact of index size on response time for first " "nearest neighbors queries") # Plot average query speedup versus index size plt.figure() plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups, fmt='o-', c='r') plt.ylim(0, None) plt.ylabel("Average speedup") plt.xlabel("n_samples") plt.grid(which='both') plt.title("Speedup of the approximate NN queries vs brute force") # Plot average precision versus index size plt.figure() plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c') plt.ylim(0, 1.1) plt.ylabel("precision@10") plt.xlabel("n_samples") plt.grid(which='both') plt.title("precision of 10-nearest-neighbors queries with index size") plt.show()
bsd-3-clause
peter-reinholdt/MM837
Assignment_1/scripts/plot_data.py
1
1062
#!/usr/bin/env python import numpy as np import matplotlib.pyplot as plt import sys filename = sys.argv[1] stop = 20000 x = np.loadtxt(filename) plt.figure(figsize=(10,5)) plt.subplot(421) plt.plot(x[:stop,0], "k.") plt.xlabel("Timestep (x100)") plt.ylabel(r"$<p^2>$") plt.subplot(422) plt.plot(x[:stop,1]) plt.xlabel("Timestep (x100)") plt.ylabel(r"$E$") plt.subplot(423) plt.plot(x[:stop,2]) plt.xlabel("Timestep (x100)") plt.ylabel(r"$T$") plt.subplot(424) plt.plot(x[:stop,3]) plt.xlabel("Timestep (x100)") plt.ylabel(r"$U$") plt.subplot(425) plt.plot(x[:stop,4]) plt.xlabel("Timestep (x100)") plt.ylabel(r"$P$") plt.subplot(426) plt.plot(x[:stop,5]) plt.xlabel("Timestep (x100)") plt.ylabel(r"$Q$") plt.subplot(427) plt.plot(x[:10000,0]) plt.xlabel("Timestep (x100)") plt.ylabel(r"$<p^2>$") plt.subplot(428) plt.plot(x[:1000,0]) plt.xlabel("Timestep (x100)") plt.ylabel(r"$<p^2>$") plt.figure() plt.hist(x[10000:,0], bins=100) plt.xlabel(r"$<p^2>$") plt.ylabel("Count") plt.show() print(np.average(x[10000:,0]), np.var(x[10000:,0]))
bsd-2-clause
sgenoud/scikit-learn
examples/decomposition/plot_pca_iris.py
3
1625
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= PCA example with Iris Data-set ========================================================= """ print __doc__ # Code source: Gael Varoqueux # License: BSD import numpy as np import pylab as pl from mpl_toolkits.mplot3d import Axes3D from sklearn import decomposition from sklearn import datasets np.random.seed(5) centers = [[1, 1], [-1, -1], [1, -1]] iris = datasets.load_iris() X = iris.data y = iris.target fig = pl.figure(1, figsize=(4, 3)) pl.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134) pl.cla() pca = decomposition.PCA(n_components=3) pca.fit(X) X = pca.transform(X) for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]: ax.text3D(X[y == label, 0].mean(), X[y == label, 1].mean() + 1.5, X[y == label, 2].mean(), name, horizontalalignment='center', bbox=dict(alpha=.5, edgecolor='w', facecolor='w'), ) # Reorder the labels to have colors matching the cluster results y = np.choose(y, [1, 2, 0]).astype(np.float) ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=pl.cm.spectral) x_surf = [X[:, 0].min(), X[:, 0].max(), X[:, 0].min(), X[:, 0].max()] y_surf = [X[:, 0].max(), X[:, 0].max(), X[:, 0].min(), X[:, 0].min()] x_surf = np.array(x_surf) y_surf = np.array(y_surf) v0 = pca.transform(pca.components_[0]) v0 /= v0[-1] v1 = pca.transform(pca.components_[1]) v1 /= v1[-1] ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) pl.show()
bsd-3-clause
BigDataforYou/movie_recommendation_workshop_1
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/stats/tests/common.py
9
4394
# pylint: disable-msg=W0611,W0402 # flake8: noqa from datetime import datetime import string import nose import numpy as np from pandas import DataFrame, bdate_range from pandas.util.testing import assert_almost_equal # imported in other tests import pandas.util.testing as tm N = 100 K = 4 start = datetime(2007, 1, 1) DATE_RANGE = bdate_range(start, periods=N) COLS = ['Col' + c for c in string.ascii_uppercase[:K]] def makeDataFrame(): data = DataFrame(np.random.randn(N, K), columns=COLS, index=DATE_RANGE) return data def getBasicDatasets(): A = makeDataFrame() B = makeDataFrame() C = makeDataFrame() return A, B, C def check_for_scipy(): try: import scipy except ImportError: raise nose.SkipTest('no scipy') def check_for_statsmodels(): _have_statsmodels = True try: import statsmodels.api as sm except ImportError: try: import scikits.statsmodels.api as sm except ImportError: raise nose.SkipTest('no statsmodels') class BaseTest(tm.TestCase): def setUp(self): check_for_scipy() check_for_statsmodels() self.A, self.B, self.C = getBasicDatasets() self.createData1() self.createData2() self.createData3() def createData1(self): date = datetime(2007, 1, 1) date2 = datetime(2007, 1, 15) date3 = datetime(2007, 1, 22) A = self.A.copy() B = self.B.copy() C = self.C.copy() A['ColA'][date] = np.NaN B['ColA'][date] = np.NaN C['ColA'][date] = np.NaN C['ColA'][date2] = np.NaN # truncate data to save time A = A[:30] B = B[:30] C = C[:30] self.panel_y = A self.panel_x = {'B': B, 'C': C} self.series_panel_y = A.filter(['ColA']) self.series_panel_x = {'B': B.filter(['ColA']), 'C': C.filter(['ColA'])} self.series_y = A['ColA'] self.series_x = {'B': B['ColA'], 'C': C['ColA']} def createData2(self): y_data = [[1, np.NaN], [2, 3], [4, 5]] y_index = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)] y_cols = ['A', 'B'] self.panel_y2 = DataFrame(np.array(y_data), index=y_index, columns=y_cols) x1_data = [[6, np.NaN], [7, 8], [9, 30], [11, 12]] x1_index = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 4)] x1_cols = ['A', 'B'] x1 = DataFrame(np.array(x1_data), index=x1_index, columns=x1_cols) x2_data = [[13, 14, np.NaN], [15, np.NaN, np.NaN], [16, 17, 48], [19, 20, 21], [22, 23, 24]] x2_index = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5)] x2_cols = ['C', 'A', 'B'] x2 = DataFrame(np.array(x2_data), index=x2_index, columns=x2_cols) self.panel_x2 = {'x1': x1, 'x2': x2} def createData3(self): y_data = [[1, 2], [3, 4]] y_index = [datetime(2000, 1, 1), datetime(2000, 1, 2)] y_cols = ['A', 'B'] self.panel_y3 = DataFrame(np.array(y_data), index=y_index, columns=y_cols) x1_data = [['A', 'B'], ['C', 'A']] x1_index = [datetime(2000, 1, 1), datetime(2000, 1, 2)] x1_cols = ['A', 'B'] x1 = DataFrame(np.array(x1_data), index=x1_index, columns=x1_cols) x2_data = [['foo', 'bar'], ['baz', 'foo']] x2_index = [datetime(2000, 1, 1), datetime(2000, 1, 2)] x2_cols = ['A', 'B'] x2 = DataFrame(np.array(x2_data), index=x2_index, columns=x2_cols) self.panel_x3 = {'x1': x1, 'x2': x2}
mit
Djabbz/scikit-learn
sklearn/metrics/cluster/bicluster.py
359
2797
from __future__ import division import numpy as np from sklearn.utils.linear_assignment_ import linear_assignment from sklearn.utils.validation import check_consistent_length, check_array __all__ = ["consensus_score"] def _check_rows_and_columns(a, b): """Unpacks the row and column arrays and checks their shape.""" check_consistent_length(*a) check_consistent_length(*b) checks = lambda x: check_array(x, ensure_2d=False) a_rows, a_cols = map(checks, a) b_rows, b_cols = map(checks, b) return a_rows, a_cols, b_rows, b_cols def _jaccard(a_rows, a_cols, b_rows, b_cols): """Jaccard coefficient on the elements of the two biclusters.""" intersection = ((a_rows * b_rows).sum() * (a_cols * b_cols).sum()) a_size = a_rows.sum() * a_cols.sum() b_size = b_rows.sum() * b_cols.sum() return intersection / (a_size + b_size - intersection) def _pairwise_similarity(a, b, similarity): """Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j. """ a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b) n_a = a_rows.shape[0] n_b = b_rows.shape[0] result = np.array(list(list(similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)) for i in range(n_a))) return result def consensus_score(a, b, similarity="jaccard"): """The similarity of two sets of biclusters. Similarity between individual biclusters is computed. Then the best matching between sets is found using the Hungarian algorithm. The final score is the sum of similarities divided by the size of the larger set. Read more in the :ref:`User Guide <biclustering>`. Parameters ---------- a : (rows, columns) Tuple of row and column indicators for a set of biclusters. b : (rows, columns) Another set of biclusters like ``a``. similarity : string or function, optional, default: "jaccard" May be the string "jaccard" to use the Jaccard coefficient, or any function that takes four arguments, each of which is a 1d indicator vector: (a_rows, a_columns, b_rows, b_columns). References ---------- * Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis for bicluster acquisition <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__. """ if similarity == "jaccard": similarity = _jaccard matrix = _pairwise_similarity(a, b, similarity) indices = linear_assignment(1. - matrix) n_a = len(a[0]) n_b = len(b[0]) return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
bsd-3-clause
boomsbloom/dtm-fmri
DTM/for_gensim/lib/python2.7/site-packages/matplotlib/backends/backend_gtkcairo.py
8
2374
""" GTK+ Matplotlib interface using cairo (not GDK) drawing operations. Author: Steve Chaplin """ from __future__ import (absolute_import, division, print_function, unicode_literals) from matplotlib.externals import six import gtk if gtk.pygtk_version < (2,7,0): import cairo.gtk from matplotlib.backends import backend_cairo from matplotlib.backends.backend_gtk import * backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \ 'Pycairo(%s)' % backend_cairo.backend_version _debug = False #_debug = True def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ if _debug: print('backend_gtkcairo.%s()' % fn_name()) FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) return new_figure_manager_given_figure(num, thisFig) def new_figure_manager_given_figure(num, figure): """ Create a new figure manager instance for the given figure. """ canvas = FigureCanvasGTKCairo(figure) return FigureManagerGTK(canvas, num) class RendererGTKCairo (backend_cairo.RendererCairo): if gtk.pygtk_version >= (2,7,0): def set_pixmap (self, pixmap): self.gc.ctx = pixmap.cairo_create() else: def set_pixmap (self, pixmap): self.gc.ctx = cairo.gtk.gdk_cairo_create (pixmap) class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK): filetypes = FigureCanvasGTK.filetypes.copy() filetypes.update(backend_cairo.FigureCanvasCairo.filetypes) def _renderer_init(self): """Override to use cairo (rather than GDK) renderer""" if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name())) self._renderer = RendererGTKCairo (self.figure.dpi) class FigureManagerGTKCairo(FigureManagerGTK): def _get_toolbar(self, canvas): # must be inited after the window, drawingArea and figure # attrs are set if matplotlib.rcParams['toolbar']=='toolbar2': toolbar = NavigationToolbar2GTKCairo (canvas, self.window) else: toolbar = None return toolbar class NavigationToolbar2Cairo(NavigationToolbar2GTK): def _get_canvas(self, fig): return FigureCanvasGTKCairo(fig) FigureCanvas = FigureCanvasGTKCairo FigureManager = FigureManagerGTKCairo
mit
HeraclesHX/scikit-learn
sklearn/utils/tests/test_linear_assignment.py
421
1349
# Author: Brian M. Clapper, G Varoquaux # License: BSD import numpy as np # XXX we should be testing the public API here from sklearn.utils.linear_assignment_ import _hungarian def test_hungarian(): matrices = [ # Square ([[400, 150, 400], [400, 450, 600], [300, 225, 300]], 850 # expected cost ), # Rectangular variant ([[400, 150, 400, 1], [400, 450, 600, 2], [300, 225, 300, 3]], 452 # expected cost ), # Square ([[10, 10, 8], [9, 8, 1], [9, 7, 4]], 18 ), # Rectangular variant ([[10, 10, 8, 11], [9, 8, 1, 1], [9, 7, 4, 10]], 15 ), # n == 2, m == 0 matrix ([[], []], 0 ), ] for cost_matrix, expected_total in matrices: cost_matrix = np.array(cost_matrix) indexes = _hungarian(cost_matrix) total_cost = 0 for r, c in indexes: x = cost_matrix[r, c] total_cost += x assert expected_total == total_cost indexes = _hungarian(cost_matrix.T) total_cost = 0 for c, r in indexes: x = cost_matrix[r, c] total_cost += x assert expected_total == total_cost
bsd-3-clause
aewhatley/scikit-learn
examples/calibration/plot_compare_calibration.py
241
5008
""" ======================================== Comparison of Calibration of Classifiers ======================================== Well calibrated classifiers are probabilistic classifiers for which the output of the predict_proba method can be directly interpreted as a confidence level. For instance a well calibrated (binary) classifier should classify the samples such that among the samples to which it gave a predict_proba value close to 0.8, approx. 80% actually belong to the positive class. LogisticRegression returns well calibrated predictions as it directly optimizes log-loss. In contrast, the other methods return biased probilities, with different biases per method: * GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in the histograms). This is mainly because it makes the assumption that features are conditionally independent given the class, which is not the case in this dataset which contains 2 redundant features. * RandomForestClassifier shows the opposite behavior: the histograms show peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1 are very rare. An explanation for this is given by Niculescu-Mizil and Caruana [1]: "Methods such as bagging and random forests that average predictions from a base set of models can have difficulty making predictions near 0 and 1 because variance in the underlying base models will bias predictions that should be near zero or one away from these values. Because predictions are restricted to the interval [0,1], errors caused by variance tend to be one- sided near zero and one. For example, if a model should predict p = 0 for a case, the only way bagging can achieve this is if all bagged trees predict zero. If we add noise to the trees that bagging is averaging over, this noise will cause some trees to predict values larger than 0 for this case, thus moving the average prediction of the bagged ensemble away from 0. We observe this effect most strongly with random forests because the base-level trees trained with random forests have relatively high variance due to feature subseting." As a result, the calibration curve shows a characteristic sigmoid shape, indicating that the classifier could trust its "intuition" more and return probabilties closer to 0 or 1 typically. * Support Vector Classification (SVC) shows an even more sigmoid curve as the RandomForestClassifier, which is typical for maximum-margin methods (compare Niculescu-Mizil and Caruana [1]), which focus on hard samples that are close to the decision boundary (the support vectors). .. topic:: References: .. [1] Predicting Good Probabilities with Supervised Learning, A. Niculescu-Mizil & R. Caruana, ICML 2005 """ print(__doc__) # Author: Jan Hendrik Metzen <[email protected]> # License: BSD Style. import numpy as np np.random.seed(0) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC from sklearn.calibration import calibration_curve X, y = datasets.make_classification(n_samples=100000, n_features=20, n_informative=2, n_redundant=2) train_samples = 100 # Samples used for training the models X_train = X[:train_samples] X_test = X[train_samples:] y_train = y[:train_samples] y_test = y[train_samples:] # Create classifiers lr = LogisticRegression() gnb = GaussianNB() svc = LinearSVC(C=1.0) rfc = RandomForestClassifier(n_estimators=100) ############################################################################### # Plot calibration plots plt.figure(figsize=(10, 10)) ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax2 = plt.subplot2grid((3, 1), (2, 0)) ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated") for clf, name in [(lr, 'Logistic'), (gnb, 'Naive Bayes'), (svc, 'Support Vector Classification'), (rfc, 'Random Forest')]: clf.fit(X_train, y_train) if hasattr(clf, "predict_proba"): prob_pos = clf.predict_proba(X_test)[:, 1] else: # use decision function prob_pos = clf.decision_function(X_test) prob_pos = \ (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min()) fraction_of_positives, mean_predicted_value = \ calibration_curve(y_test, prob_pos, n_bins=10) ax1.plot(mean_predicted_value, fraction_of_positives, "s-", label="%s" % (name, )) ax2.hist(prob_pos, range=(0, 1), bins=10, label=name, histtype="step", lw=2) ax1.set_ylabel("Fraction of positives") ax1.set_ylim([-0.05, 1.05]) ax1.legend(loc="lower right") ax1.set_title('Calibration plots (reliability curve)') ax2.set_xlabel("Mean predicted value") ax2.set_ylabel("Count") ax2.legend(loc="upper center", ncol=2) plt.tight_layout() plt.show()
bsd-3-clause
sassoftware/saspy
saspy/sasiohttp.py
1
75045
# # Copyright SAS Institute # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import http.client as hc import base64 import json import os import ssl import sys import urllib import warnings import io import tempfile as tf from time import sleep from saspy.sasexceptions import (SASHTTPauthenticateError, SASHTTPconnectionError, SASHTTPsubmissionError) try: import pandas as pd import numpy as np except ImportError: pass class SASconfigHTTP: ''' This object is not intended to be used directly. Instantiate a SASsession object instead ''' def __init__(self, session, **kwargs): self._kernel = kwargs.get('kernel', None) SAScfg = session._sb.sascfg.SAScfg self.name = session._sb.sascfg.name cfg = getattr(SAScfg, self.name) self._token = cfg.get('authtoken', None) self.url = cfg.get('url', '') self.ip = cfg.get('ip', '') self.port = cfg.get('port', None) self.ctxname = cfg.get('context', '') self.ctx = {} self.options = cfg.get('options', []) self.ssl = cfg.get('ssl', True) self.verify = cfg.get('verify', True) self.timeout = cfg.get('timeout', None) user = cfg.get('user', '') pw = cfg.get('pw', '') client_id = cfg.get('client_id', None) client_secret = cfg.get('client_secret', '') authcode = cfg.get('authcode', None) self.encoding = cfg.get('encoding', '') self.authkey = cfg.get('authkey', '') self._prompt = session._sb.sascfg._prompt self.lrecl = cfg.get('lrecl', None) self.inactive = cfg.get('inactive', 120) try: self.outopts = getattr(SAScfg, "SAS_output_options") self.output = self.outopts.get('output', 'html5') except: self.output = 'html5' if self.output.lower() not in ['html', 'html5']: print("Invalid value specified for SAS_output_options. Using the default of HTML5") self.output = 'html5' # GET Config options try: self.cfgopts = getattr(SAScfg, "SAS_config_options") except: self.cfgopts = {} lock = self.cfgopts.get('lock_down', True) # in lock down mode, don't allow runtime overrides of option values from the config file. self.verbose = self.cfgopts.get('verbose', True) self.verbose = kwargs.get('verbose', self.verbose) inurl = kwargs.get('url', None) if inurl is not None: if lock and len(self.url): print("Parameter 'url' passed to SAS_session was ignored due to configuration restriction.") else: self.url = inurl inip = kwargs.get('ip', None) if inip is not None: if lock and len(self.ip): print("Parameter 'ip' passed to SAS_session was ignored due to configuration restriction.") else: self.ip = inip inport = kwargs.get('port', None) if inport is not None: if lock and self.port: print("Parameter 'port' passed to SAS_session was ignored due to configuration restriction.") else: self.port = inport inctxname = kwargs.get('context', None) if inctxname is not None: if lock and len(self.ctxname): print("Parameter 'context' passed to SAS_session was ignored due to configuration restriction.") else: self.ctxname = inctxname inoptions = kwargs.get('options', None) if inoptions is not None: if lock and len(self.options): print("Parameter 'options' passed to SAS_session was ignored due to configuration restriction.") else: self.options = inoptions inssl = kwargs.get('ssl', None) if inssl is not None: if lock and self.ssl: print("Parameter 'ssl' passed to SAS_session was ignored due to configuration restriction.") else: self.ssl = bool(inssl) inver = kwargs.get('verify', None) if inver is not None: if lock and self.verify: print("Parameter 'verify' passed to SAS_session was ignored due to configuration restriction.") else: self.verify = bool(inver) intout = kwargs.get('timeout', None) if intout is not None: if lock and self.timeout: print("Parameter 'timeout' passed to SAS_session was ignored due to configuration restriction.") else: self.timeout = intout inencoding = kwargs.get('encoding', 'NoOverride') if inencoding != 'NoOverride': if lock and len(self.encoding): print("Parameter 'encoding' passed to SAS_session was ignored due to configuration restriction.") else: self.encoding = inencoding if not self.encoding or self.encoding != 'utf_8': self.encoding = 'utf_8' inautht = kwargs.get('authtoken', None) if inautht is not None: if lock and self._token: print("Parameter 'authtoken' passed to SAS_session was ignored due to configuration restriction.") else: self._token = inautht inauthc = kwargs.get('authcode', None) if inauthc is not None: if lock and authcode: print("Parameter 'authcode' passed to SAS_session was ignored due to configuration restriction.") else: authcode = inauthc incis = kwargs.get('client_secret', None) if incis is not None: if lock and client_secret: print("Parameter 'client_secret' passed to SAS_session was ignored due to configuration restriction.") else: client_secret = incis incid = kwargs.get('client_id', None) if incid is not None: if lock and client_id: print("Parameter 'client_id' passed to SAS_session was ignored due to configuration restriction.") else: client_id = incid if client_id is None: client_id = 'SASPy' use_authcode = False else: use_authcode = True inlrecl = kwargs.get('lrecl', None) if inlrecl is not None: if lock and self.lrecl: print("Parameter 'lrecl' passed to SAS_session was ignored due to configuration restriction.") else: self.lrecl = inlrecl if not self.lrecl: self.lrecl = 1048576 inito = kwargs.get('inactive', None) if inito is not None: if lock and self.inactive: print("Parameter 'inactive' passed to SAS_session was ignored due to configuration restriction.") else: self.inactive = inito inak = kwargs.get('authkey', '') if len(inak) > 0: if lock and len(self.authkey): print("Parameter 'authkey' passed to SAS_session was ignored due to configuration restriction.") else: self.authkey = inak if len(self.url) > 0: http = self.url.split('://') hp = http[1].split(':') if http[0].lower() in ['http', 'https']: self.ip = hp[0] self.port = hp[1] if len(hp) > 1 else self.port self.ssl = True if 's' in http[0].lower() else False else: print("Parameter 'url' not in recognized format. Expeting 'http[s]://host[:port]'. Ignoring parameter.") while len(self.ip) == 0: if not lock: self.ip = self._prompt("Please enter the host (ip address) you are trying to connect to: ") if self.ip is None: self._token = None raise RuntimeError("No IP address provided.") else: print("In lockdown mode and missing ip adress in the config named: "+cfgname ) raise RuntimeError("No IP address provided.") if not self.port: if self.ssl: self.port = 443 else: self.port = 80 if not self._token and not authcode: found = False if self.authkey: if os.name == 'nt': pwf = os.path.expanduser('~')+os.sep+'_authinfo' else: pwf = os.path.expanduser('~')+os.sep+'.authinfo' try: fid = open(pwf, mode='r') for line in fid: if line.startswith(self.authkey): user = line.partition('user')[2].lstrip().partition(' ')[0].partition('\n')[0] pw = line.partition('password')[2].lstrip().partition(' ')[0].partition('\n')[0] found = True break fid.close() except OSError as e: print('Error trying to read authinfo file:'+pwf+'\n'+str(e)) pass except: pass if not found: print('Did not find key '+self.authkey+' in authinfo file:'+pwf+'\n') inuser = kwargs.get('user', '') if len(inuser) > 0: if lock and len(user): print("Parameter 'user' passed to SAS_session was ignored due to configuration restriction.") else: user = inuser inpw = kwargs.get('pw', '') if len(inpw) > 0: if lock and len(pw): print("Parameter 'pw' passed to SAS_session was ignored due to configuration restriction.") else: pw = inpw if use_authcode: code_pw = 'authcode' else: code_pw = '' if len(user) == 0: msg = "To connect to Viya you need either an authcode or a userid/pw. Neither were provided.\n" msg += "Please enter which one you want to enter next. Type one of these now: [default=authcode | userid]: " while code_pw.lower() not in ['userid','authcode']: code_pw = self._prompt(msg) if code_pw == '': code_pw = 'authcode' if code_pw is None: self._token = None raise RuntimeError("Neither authcode nor userid provided.") if code_pw.lower() == 'authcode': purl = "/SASLogon/oauth/authorize?client_id={}&response_type=code".format(client_id) if len(self.url) > 0: purl = self.url+purl else: purl = "http{}://{}:{}{}".format('s' if self.ssl else '', self.ip, self.port, purl) msg = "The default url to authenticate with would be {}\n".format(purl) msg += "Please enter authcode: " authcode = self._prompt(msg) if authcode is None: self._token = None raise RuntimeError("No authcode provided.") else: while len(user) == 0: user = self._prompt("Please enter userid: ") if user is None: self._token = None raise RuntimeError("No userid provided.") while len(pw) == 0: pw = self._prompt("Please enter password: ", pw = True) if pw is None: self._token = None raise RuntimeError("No password provided.") if self.ssl: if self.verify: # handle having self signed certificate default on Viya w/out copies on client; still ssl, just not verifyable try: self.HTTPConn = hc.HTTPSConnection(self.ip, self.port, timeout=self.timeout) if not self._token: self._token = self._authenticate(user, pw, authcode, client_id, client_secret) except ssl.SSLError as e: print("SSL certificate verification failed, creating an unverified SSL connection. Error was:"+str(e)) self.HTTPConn = hc.HTTPSConnection(self.ip, self.port, timeout=self.timeout, context=ssl._create_unverified_context()) print("You can set 'verify=False' to get rid of this message ") if not self._token: self._token = self._authenticate(user, pw, authcode, client_id, client_secret) else: self.HTTPConn = hc.HTTPSConnection(self.ip, self.port, timeout=self.timeout, context=ssl._create_unverified_context()) if not self._token: self._token = self._authenticate(user, pw, authcode, client_id, client_secret) else: self.HTTPConn = hc.HTTPConnection(self.ip, self.port, timeout=self.timeout) if not self._token: self._token = self._authenticate(user, pw, authcode, client_id, client_secret) if not self._token: print("Could not acquire an Authentication Token") return # GET Contexts contexts = self._get_contexts() if contexts == None: self._token = None raise SASHTTPconnectionError(msg="No Contexts found on Compute Service at ip="+self.ip) ctxnames = [] for i in range(len(contexts)): ctxnames.append(contexts[i].get('name')) if len(ctxnames) == 0: self._token = None raise SASHTTPconnectionError(msg="No Contexts found on Compute Service at ip="+self.ip) if len(self.ctxname) == 0: if len(ctxnames) == 1: self.ctxname = ctxnames[0] print("Using SAS Context: " + self.ctxname) else: try: ctxname = self._prompt("Please enter the SAS Context you wish to run. Available contexts are: " + str(ctxnames)+" ") if ctxname is None: self._token = None raise RuntimeError("No SAS Context provided.") else: self.ctxname = ctxname except: raise SASHTTPconnectionError(msg= "SAS Context specified '"+self.ctxname+"' was not found. Prompting failed. Available contexts were: " + str(ctxnames)+" ") while self.ctxname not in ctxnames: if not lock: ''' this was original code before compute was production. users can't create these on the fly. createctx = self._prompt( "SAS Context specified was not found. Do you want to create a new context named "+self.ctxname+" [Yes|No]?") if createctx.upper() in ('YES', 'Y'): contexts = self._create_context(user) else: ''' try: ctxname = self._prompt( "SAS Context specified was not found. Please enter the SAS Context you wish to run. Available contexts are: " + str(ctxnames)+" ") if ctxname is None: self._token = None raise SASHTTPconnectionError(msg= "SAS Context specified '"+self.ctxname+"' was not found. Prompting failed. Available contexts were: " + str(ctxnames)+" ") else: self.ctxname = ctxname except: raise SASHTTPconnectionError(msg= "SAS Context specified '"+self.ctxname+"' was not found. Prompting failed. Available contexts were: " + str(ctxnames)+" ") else: msg = "SAS Context specified in the SASconfig ("+self.ctxname+") was not found on this server, and because " msg += "the SASconfig is in lockdown mode, there is no prompting for other contexts. No connection established." print(msg) self._token = None raise RuntimeError("No SAS Context provided.") for i in range(len(contexts)): if contexts[i].get('name') == self.ctxname: self.ctx = contexts[i] break if self.ctx == {}: raise SASHTTPconnectionError(msg="No context information returned for context {}\n{}".format(self.ctxname, contexts)) return def _authenticate(self, user, pw, authcode, client_id, client_secret): #import pdb; pdb.set_trace() if authcode: uauthcode = urllib.parse.quote(authcode) uclient_id = urllib.parse.quote(client_id) uclient_secret = urllib.parse.quote(client_secret) d1 = ("grant_type=authorization_code&code="+uauthcode+"&client_id="+uclient_id+"&client_secret="+uclient_secret).encode(self.encoding) headers={"Accept":"application/vnd.sas.compute.session+json","Content-Type":"application/x-www-form-urlencoded"} else: uuser = urllib.parse.quote(user) upw = urllib.parse.quote(pw) d1 = ("grant_type=password&username="+uuser+"&password="+upw).encode(self.encoding) basic = base64.encodebytes("sas.tkmtrb:".encode(self.encoding)) authheader = '%s' % basic.splitlines()[0].decode(self.encoding) headers={"Accept":"application/vnd.sas.compute.session+json","Content-Type":"application/x-www-form-urlencoded", "Authorization":"Basic "+authheader} # POST AuthToken conn = self.HTTPConn; conn.connect() try: conn.request('POST', "/SASLogon/oauth/token", body=d1, headers=headers) req = conn.getresponse() except: #print("Failure in GET AuthToken. Could not connect to the logon service. Exception info:\n"+str(sys.exc_info())) msg="Failure in GET AuthToken. Could not connect to the logon service. Exception info:\n"+str(sys.exc_info()) raise SASHTTPauthenticateError(msg) #return None status = req.status resp = req.read() conn.close() if status > 299: #print("Failure in GET AuthToken. Status="+str(status)+"\nResponse="+resp.decode(self.encoding)) msg="Failure in GET AuthToken. Status="+str(status)+"\nResponse="+str(resp) raise SASHTTPauthenticateError(msg) #return None js = json.loads(resp.decode(self.encoding)) token = js.get('access_token') return token def _get_contexts(self): #import pdb; pdb.set_trace() # GET Contexts conn = self.HTTPConn; conn.connect() headers={"Accept":"application/vnd.sas.collection+json", "Accept-Item":"application/vnd.sas.compute.context.summary+json", "Authorization":"Bearer "+self._token} conn.request('GET', "/compute/contexts?limit=999999", headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() if status > 299: fmsg = "Failure in GET Contexts. Status="+str(status)+"\nResponse="+resp.decode(self.encoding) raise SASHTTPconnectionError(msg=fmsg) js = json.loads(resp.decode(self.encoding)) contexts = js.get('items') return contexts def _create_context(self, user): # GET Contexts conn = self.HTTPConn; conn.connect() d1 = '{"name": "SASPy","version": 1,"description": "SASPy Context","attributes": {"sessionInactiveTimeout": 60 },' d1 += '"launchContext": {"contextName": "'+self.ctxname+'"},"launchType": "service","authorizedUsers": ["'+user+'"]}' headers={"Accept":"application/vnd.sas.compute.context+json", "Content-Type":"application/vnd.sas.compute.context.request+json", "Authorization":"Bearer "+self._token} conn.request('POST', "/compute/contexts", body=d1, headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() if status > 299: print("Failure in POST Context. Status="+str(status)+"\nResponse="+resp.decode(self.encoding)) return None contexts = self._get_contexts() return contexts class SASsessionHTTP(): ''' The SASsession object is the main object to instantiate and provides access to the rest of the functionality. cfgname - value in SAS_config_names List of the sascfg.py file kernel - None - internal use when running the SAS_kernel notebook user - userid to use to connect to Compute Service pw - pw for the userid being used to connect to Compute Service ip - overrides IP Dict entry of cfgname in sascfg.py file port - overrides Port Dict entry of cfgname in sascfg.py file context - overrides Context Dict entry of cfgname in sascfg.py file options - overrides Options Dict entry of cfgname in sascfg.py file encoding - This is the python encoding value that matches the SAS session encoding of the Compute Server you are connecting to ''' #def __init__(self, cfgname: str ='', kernel: '<SAS_kernel object>' =None, user: str ='', pw: str ='', # ip: str ='', port: int ='', context: str ='', options: list =[]) -> '<SASsession object>': def __init__(self, **kwargs): self.pid = None self._session = None self._sb = kwargs.get('sb', None) self._log = "\nNo SAS session established, something must have failed trying to connect\n" self.sascfg = SASconfigHTTP(self, **kwargs) if self.sascfg._token: self._startsas() else: None def __del__(self): if self._session: self._endsas() self._sb.SASpid = None return def _startsas(self): if self.pid: return self.pid if len(self.sascfg.options): options = '['; for opt in self.sascfg.options: options += '"'+opt+'", ' options = (options.rpartition(','))[0]+']' else: options = '[]' # POST Session uri = None for ld in self.sascfg.ctx.get('links'): if ld.get('method') == 'POST': uri = ld.get('uri') break if not uri: raise SASHTTPconnectionError(msg= "POST uri not found in context info. You may not have permission to use this context.\n{}".format(self.sascfg.ctx)) conn = self.sascfg.HTTPConn; conn.connect() d1 = '{"name":"'+self.sascfg.ctxname+'", "description":"saspy session", "version":1, "environment":{"options":'+options+'}' d1 += ',"attributes": {"sessionInactiveTimeout": '+str(int(float(self.sascfg.inactive)*60))+'}}' headers={"Accept":"application/vnd.sas.compute.session+json","Content-Type":"application/vnd.sas.compute.session.request+json", "Authorization":"Bearer "+self.sascfg._token} try: conn.request('POST', uri, body=d1, headers=headers) req = conn.getresponse() except: #print("Could not acquire a SAS Session for context: "+self.sascfg.ctxname) raise SASHTTPconnectionError(msg="Could not acquire a SAS Session for context: "+self.sascfg.ctxname+". Exception info:\n"+str(sys.exc_info())) #return None status = req.status resp = req.read() conn.close() if status > 299: #print("Failure in POST Session \n"+resp.decode(self.sascfg.encoding)) #print("Could not acquire a SAS Session for context: "+self.sascfg.ctxname) msg="Could not acquire a SAS Session for context: "+self.sascfg.ctxname+". Exception info:\nStatus="+str(status)+"\nResponse="+str(resp) raise SASHTTPconnectionError(msg) #return None self._session = json.loads(resp.decode(self.sascfg.encoding)) if self._session == None: print("Could not acquire a SAS Session for context: "+self.sascfg.ctxname) return None #GET Session uri's once for ld in self._session.get('links'): if ld.get('method') == 'GET' and ld.get('rel') == 'log': self._uri_log = ld.get('uri') elif ld.get('method') == 'GET' and ld.get('rel') == 'listing': self._uri_lst = ld.get('uri') elif ld.get('method') == 'GET' and ld.get('rel') == 'results': self._uri_ods = ld.get('uri') elif ld.get('method') == 'GET' and ld.get('rel') == 'state': self._uri_state = ld.get('uri') elif ld.get('method') == 'POST' and ld.get('rel') == 'execute': self._uri_exe = ld.get('uri') elif ld.get('method') == 'PUT' and ld.get('rel') == 'cancel': self._uri_can = ld.get('uri') elif ld.get('method') == 'DELETE' and ld.get('rel') == 'delete': self._uri_del = ld.get('uri') elif ld.get('method') == 'GET' and ld.get('rel') == 'files': self._uri_files = ld.get('uri') self.pid = self._session.get('id') self._log = self._getlog() # POST Job - Lets see if the server really came up, cuz you can't tell from what happend so far conn = self.sascfg.HTTPConn; conn.connect() jcode = json.dumps('\n') d1 = '{"code":['+jcode+']}' headers={"Accept":"application/json","Content-Type":"application/vnd.sas.compute.job.request+json", "Authorization":"Bearer "+self.sascfg._token} conn.request('POST', self._uri_exe, body=d1, headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() try: jobid = json.loads(resp.decode(self.sascfg.encoding)) except: jobid = None if not jobid or status > 299: print("Compute server had issues starting:\n") for key in jobid: print(key+"="+str(jobid.get(key))) return None ll = self.submit("options svgtitle='svgtitle'; options validvarname=any validmemname=extend pagesize=max nosyntaxcheck; ods graphics on;", "text") if self.sascfg.verbose: print("SAS server started using Context "+self.sascfg.ctxname+" with SESSION_ID="+self.pid) return self.pid def _endsas(self): rc = 0 if self._session: # DELETE Session conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"application/json","Authorization":"Bearer "+self.sascfg._token} conn.request('DELETE', self._uri_del, headers=headers) req = conn.getresponse() resp = req.read() conn.close() if self.sascfg.verbose: print("SAS server terminated for SESSION_ID="+self._session.get('id')) self._session = None self.pid = None self._sb.SASpid = None return rc def _getlog(self, jobid=None): start = 0 logr = '' # GET Log if jobid: for ld in jobid.get('links'): if ld.get('method') == 'GET' and ld.get('rel') == 'log': uri = ld.get('uri') break else: uri = self._uri_log while True: # GET Log conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token} conn.request('GET', uri+"?start="+str(start)+"&limit="+str(start+1000), headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() try: js = json.loads(resp.decode(self.sascfg.encoding)) log = js.get('items') lines = len(log) except: lines = None if not lines: break start += lines for line in log: logr += line.get('line')+'\n' if jobid != None: self._log += logr.replace(chr(12), chr(10)) if logr.count('ERROR:') > 0: warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem") self._sb.check_error_log = True return logr def _getlst(self, jobid=None): htm = '' i = 0 # GET the list of results if jobid: for ld in jobid.get('links'): if ld.get('method') == 'GET' and ld.get('rel') == 'results': uri = ld.get('uri')+"?includeTypes=ODS" break else: uri = self._uri_lst conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token} conn.request('GET', uri, headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() try: js = json.loads(resp.decode(self.sascfg.encoding)) results = js.get('items') except: results = [] conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token} while i < len(results): # GET an ODS Result if results[i].get('type') == 'ODS' and len(results[i].get('links')) > 0: conn.request('GET', results[i].get('links')[0].get('href'), headers=headers) req = conn.getresponse() status = req.status resp = req.read() htm += resp.decode(self.sascfg.encoding) i += 1 conn.close() lstd = htm.replace(chr(12), chr(10)).replace('<body class="c body">', '<body class="l body">').replace("font-size: x-small;", "font-size: normal;") return lstd def _getlsttxt(self, jobid=None): start = 0 lstr = '' # GET Log if jobid: for ld in jobid.get('links'): if ld.get('method') == 'GET' and ld.get('rel') == 'listing': uri = ld.get('uri') break else: uri = self._uri_lst while True: conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token} conn.request('GET', uri+"?start="+str(start)+"&limit="+str(start+1000), headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() try: js = json.loads(resp.decode(self.sascfg.encoding)) lst = js.get('items') lines = len(lst) except: lines = None if not lines: break start += lines for line in lst: lstr += line.get('line')+'\n' return lstr def _asubmit(self, code, results="html"): #odsopen = json.dumps("ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=png; ods graphics on / outputfmt=png;\n") #odsopen = json.dumps("ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=svg; ods graphics on / outputfmt=png;\n") #odsclose = json.dumps("ods html5 (id=saspy_internal) close;ods listing;\n") odsopen = json.dumps("ods listing close;ods "+self.sascfg.output+" (id=saspy_internal) options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style+"; ods graphics on / outputfmt=png;\n") odsclose = json.dumps("ods "+self.sascfg.output+" (id=saspy_internal) close;ods listing;\n") ods = True; if results.upper() != "HTML": ods = False odsopen = '""' odsclose = '""' # POST Job conn = self.sascfg.HTTPConn; conn.connect() jcode = json.dumps(code) d1 = '{"code":['+odsopen+','+jcode+','+odsclose+']}' headers={"Accept":"application/json","Content-Type":"application/vnd.sas.compute.job.request+json", "Authorization":"Bearer "+self.sascfg._token} conn.request('POST', self._uri_exe, body=d1, headers=headers) req = conn.getresponse() resp = req.read() conn.close() jobid = json.loads(resp.decode(self.sascfg.encoding)) return jobid def submit(self, code: str, results: str ="html", prompt: dict = None, **kwargs) -> dict: ''' code - the SAS statements you want to execute results - format of results, HTML is default, TEXT is the alternative prompt - dict of names:flags to prompt for; create marco variables (used in submitted code), then keep or delete The keys are the names of the macro variables and the boolean flag is to either hide what you type and delete the macros, or show what you type and keep the macros (they will still be available later) for example (what you type for pw will not be displayed, user and dsname will): results = sas.submit( """ libname tera teradata server=teracop1 user=&user pw=&pw; proc print data=tera.&dsname (obs=10); run; """ , prompt = {'user': False, 'pw': True, 'dsname': False} ) Returns - a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT) NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print() i.e,: results = sas.submit("data a; x=1; run; proc print;run') print(results['LOG']) HTML(results['LST']) ''' prompt = prompt if prompt is not None else {} printto = kwargs.pop('undo', False) #odsopen = json.dumps("ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=png; ods graphics on / outputfmt=png;\n") #odsopen = json.dumps("ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=svg; ods graphics on / outputfmt=png;\n") #odsclose = json.dumps("ods html5 (id=saspy_internal) close;ods listing;\n") odsopen = json.dumps("ods listing close;ods "+self.sascfg.output+" (id=saspy_internal) options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style+"; ods graphics on / outputfmt=png;\n") odsclose = json.dumps("ods "+self.sascfg.output+" (id=saspy_internal) close;ods listing;\n") ods = True; pcodei = '' pcodeiv = '' pcodeo = '' if self._session == None: print("No SAS process attached. SAS process has terminated unexpectedly.") return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST='') if results.upper() != "HTML": ods = False odsopen = '""' odsclose = '""' if len(prompt): pcodei += 'options nosource nonotes;\n' pcodeo += 'options nosource nonotes;\n' for key in prompt: gotit = False while not gotit: var = self.sascfg._prompt('Please enter value for macro variable '+key+' ', pw=prompt[key]) if var is None: raise RuntimeError("No value for prompted macro variable provided.") if len(var) > 0: gotit = True else: print("Sorry, didn't get a value for that variable.") if prompt[key]: pcodei += '%let '+key+'='+var+';\n' else: pcodeiv += '%let '+key+'='+var+';\n' if prompt[key]: pcodeo += '%symdel '+key+';\n' pcodei += 'options source notes;\n' pcodeo += 'options source notes;\n' # POST Job conn = self.sascfg.HTTPConn; conn.connect() jcode = json.dumps(pcodei+pcodeiv+code+'\n'+pcodeo) d1 = '{"code":['+odsopen+','+jcode+','+odsclose+']}' headers={"Accept":"application/json","Content-Type":"application/vnd.sas.compute.job.request+json", "Authorization":"Bearer "+self.sascfg._token} conn.request('POST', self._uri_exe, body=d1, headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() try: jobid = json.loads(resp.decode(self.sascfg.encoding)) except: raise SASHTTPsubmissionError(msg="Problem parsing response from Compute Service.\n Status="+str(status)+"\n Response="+str(resp)) if not jobid or status > 299: raise SASHTTPsubmissionError(msg="Problem submitting job to Compute Service.\n Status code="+str(jobid.get('httpStatusCode'))+"\n Message="+jobid.get('message')) for ld in jobid.get('links'): if ld.get('method') == 'GET' and ld.get('rel') == 'state': uri = ld.get('uri') break conn = self.sascfg.HTTPConn; headers = {"Accept":"text/plain", "Authorization":"Bearer "+self.sascfg._token} done = False delay = kwargs.get('GETstatusDelay' , 0.5) excpcnt = kwargs.get('GETstatusFailcnt', 5) while not done: try: while True: # GET Status for JOB conn.connect() conn.request('GET', uri, headers=headers) req = conn.getresponse() resp = req.read() conn.close() if resp not in [b'running', b'pending']: done = True break sleep(delay) except (KeyboardInterrupt, SystemExit): conn.close() print('Exception caught!') response = self.sascfg._prompt( "SAS attention handling not yet supported over HTTP. Please enter (Q) to Quit waiting for results or (C) to continue waiting.") while True: if response is None or response.upper() == 'Q': return dict(LOG='', LST='', BC=True) if response.upper() == 'C': break response = self.sascfg._prompt("Please enter (Q) to Quit waiting for results or (C) to continue waiting.") except hc.RemoteDisconnected as Dis: conn.close() print('RemoteDisconnected Exception caught!\n'+str(Dis)) excpcnt -= 1 if excpcnt < 0: raise logd = self._getlog(jobid).replace(chr(12), chr(10)) if ods: lstd = self._getlst(jobid).replace(chr(12), chr(10)) else: lstd = self._getlsttxt(jobid).replace(chr(12), chr(10)) trip = lstd.rpartition("/*]]>*/") if len(trip[1]) > 0 and len(trip[2]) < 200: lstd = '' self._sb._lastlog = logd # issue 294 if printto: conn = self.sascfg.HTTPConn; conn.connect() jcode = json.dumps('proc printto;run;\n') d1 = '{"code":['+jcode+']}' headers={"Accept":"application/json","Content-Type":"application/vnd.sas.compute.job.request+json", "Authorization":"Bearer "+self.sascfg._token} conn.request('POST', self._uri_exe, body=d1, headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() if logd.count('ERROR:') > 0: warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem") self._sb.check_error_log = True return dict(LOG=logd, LST=lstd) def saslog(self): ''' this method is used to get the current, full contents of the SASLOG ''' return self._log def exist(self, table: str, libref: str ="") -> bool: ''' table - the name of the SAS Data Set libref - the libref for the Data Set, defaults to WORK, or USER if assigned Returns True it the Data Set exists and False if it does not ''' #can't have an empty libref, so check for user or work if not libref: # HEAD Libref USER conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"*/*", "Authorization":"Bearer "+self.sascfg._token} conn.request('HEAD', "/compute/sessions/"+self.pid+"/data/USER", headers=headers) req = conn.getresponse() status = req.status conn.close() if status == 200: libref = 'USER' else: libref = 'WORK' code = 'data _null_; e = exist("' code += libref+"." code += "'"+table.strip()+"'n"+'"'+");\n" code += 'v = exist("' code += libref+"." code += "'"+table.strip()+"'n"+'"'+", 'VIEW');\n if e or v then e = 1;\n" code += "te='TABLE_EXISTS='; put te e;run;\n" ll = self.submit(code, "text") l2 = ll['LOG'].rpartition("TABLE_EXISTS= ") l2 = l2[2].partition("\n") exists = int(l2[0]) return bool(exists) """ # HEAD Data Table conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"*/*", "Authorization":"Bearer "+self.sascfg._token} conn.request('HEAD', "/compute/sessions/"+self.pid+"/data/"+libref+"/"+table, headers=headers) req = conn.getresponse() status = req.status conn.close() if status == 200: exists = True else: exists = False return exists """ def read_csv(self, file: str, table: str, libref: str ="", nosub: bool=False, opts: dict ={}) -> '<SASdata object>': ''' This method will import a csv file into a SAS Data Set and return the SASdata object referring to it. file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file table - the name of the SAS Data Set to create libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows) ''' code = "filename x " if file.lower().startswith("http"): code += "url " code += "\""+file+"\";\n" code += "proc import datafile=x out=" if len(libref): code += libref+"." code += "'"+table.strip()+"'n dbms=csv replace; "+self._sb._impopts(opts)+" run;" if nosub: print(code) else: ll = self.submit(code, "text") def write_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, dsopts: dict ={}, opts: dict ={}) -> 'The LOG showing the results of the step': ''' This method will export a SAS Data Set to a file in CCSV format. file - the OS filesystem path of the file to be created (exported from the SAS Data Set) table - the name of the SAS Data Set you want to export to a CSV file libref - the libref for the SAS Data Set. opts - a dictionary containing any of the following Proc Export options(delimiter, putnames) ''' code = "filename x \""+file+"\";\n" code += "options nosource;\n" code += "proc export data=" if len(libref): code += libref+"." code += "'"+table.strip()+"'n "+self._sb._dsopts(dsopts)+" outfile=x dbms=csv replace; " code += self._sb._expopts(opts)+" run\n;" code += "options source;\n" if nosub: print(code) else: ll = self.submit(code, "text") return ll['LOG'] def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs): """ This method uploads a local file to the SAS servers file system. localfile - path to the local file to upload remotefile - path to remote file to create or overwrite overwrite - overwrite the output file if it exists? permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax """ valid = self._sb.file_info(remotefile, quiet = True) if valid is None: remf = remotefile else: if valid == {}: remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2] else: remf = remotefile if overwrite == False: return {'Success' : False, 'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."} try: fd = open(localfile, 'rb') except OSError as e: return {'Success' : False, 'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)} fsize = os.path.getsize(localfile) if fsize > 0: code = "filename _sp_updn '"+remf+"' recfm=N permission='"+permission+"';" ll = self.submit(code, 'text') logf = ll['LOG'] # GET Etag conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"application/vnd.sas.compute.fileref+json;application/json", "Authorization":"Bearer "+self.sascfg._token} conn.request('GET', self._uri_files+"/_sp_updn", headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() Etag = req.getheader("Etag") # PUT data conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"*/*","Content-Type":"application/octet-stream", "Transfer-Encoding" : "chunked", "Authorization":"Bearer "+self.sascfg._token} conn.connect() conn.putrequest('PUT', self._uri_files+"/_sp_updn/content") conn.putheader("Accept","*/*") conn.putheader("Content-Type","application/octet-stream") conn.putheader("If-Match",Etag) conn.putheader("Transfer-Encoding","chunked") conn.putheader("Authorization","Bearer "+self.sascfg._token) conn.endheaders() blksz = int(kwargs.get('blocksize', 50000)) while True: buf = fd.read1(blksz) if len(buf) == 0: conn.send(b"0\r\n\r\n") break lenstr = "%s\r\n" % hex(len(buf))[2:] conn.send(lenstr.encode()) conn.send(buf) conn.send(b"\r\n") req = conn.getresponse() status = req.status resp = req.read() conn.close() code = "filename _sp_updn;" else: logf = '' code = """ filename _sp_updn '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""'; data _null_; fid = fopen('_sp_updn', 'O'); if fid then rc = fclose(fid); run; filename _sp_updn; """ ll = self.submit(code, 'text') logf += ll['LOG'] fd.close() return {'Success' : True, 'LOG' : logf} def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs): """ This method downloads a remote file from the SAS servers file system. localfile - path to the local file to create or overwrite remotefile - path to remote file tp dpwnload overwrite - overwrite the output file if it exists? """ valid = self._sb.file_info(remotefile, quiet = True) if valid is None: return {'Success' : False, 'LOG' : "File "+str(remotefile)+" does not exist."} if valid == {}: return {'Success' : False, 'LOG' : "File "+str(remotefile)+" is a directory."} if os.path.isdir(localfile): locf = localfile + os.sep + remotefile.rpartition(self._sb.hostsep)[2] else: locf = localfile try: fd = open(locf, 'wb') fd.write(b'write can fail even if open worked, as it turns out') fd.close() fd = open(locf, 'wb') except OSError as e: return {'Success' : False, 'LOG' : "File "+str(locf)+" could not be opened or written to. Error was: "+str(e)} code = "filename _sp_updn '"+remotefile+"' recfm=F encoding=binary lrecl=4096;" ll = self.submit(code, "text") logf = ll['LOG'] # GET data conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"*/*","Content-Type":"application/octet-stream", "Authorization":"Bearer "+self.sascfg._token} conn.request('GET', self._uri_files+"/_sp_updn/content", headers=headers) req = conn.getresponse() status = req.status fd.write(req.read()) fd.flush() fd.close() conn.close() ll = self.submit("filename _sp_updn;", 'text') logf += ll['LOG'] return {'Success' : True, 'LOG' : logf} def _getbytelenF(self, x): return len(x.encode(self.sascfg.encoding)) def _getbytelenR(self, x): return len(x.encode(self.sascfg.encoding, errors='replace')) def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a', libref: str ="", keep_outer_quotes: bool=False, embedded_newlines: bool=True, LF: str = '\x01', CR: str = '\x02', colsep: str = '\x03', colrep: str = ' ', datetimes: dict={}, outfmts: dict={}, labels: dict={}, outdsopts: dict={}, encode_errors = None, char_lengths = None, **kwargs): ''' This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set. df - Pandas Data Frame to import to a SAS Data Set table - the name of the SAS Data Set to create libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off. embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01' CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02' colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03' datetimes - dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes outfmts - dict with column names and SAS formats to assign to the new SAS data set labels - dict with column names and SAS Labels to assign to the new SAS data set outdsopts - a dictionary containing output data set options for the table being created encode_errors - 'fail' or 'replace' - default is to 'fail', other choice is to 'replace' invalid chars with the replacement char \ 'ignore' will not transcode n Python, so you get whatever happens with your data and SAS char_lengths - How to determine (and declare) lengths for CHAR variables in the output SAS data set ''' input = "" xlate = "" card = "" format = "" length = "" label = "" dts = [] ncols = len(df.columns) lf = "'"+'%02x' % ord(LF.encode(self.sascfg.encoding))+"'x" cr = "'"+'%02x' % ord(CR.encode(self.sascfg.encoding))+"'x " delim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x " dts_upper = {k.upper():v for k,v in datetimes.items()} dts_keys = dts_upper.keys() fmt_upper = {k.upper():v for k,v in outfmts.items()} fmt_keys = fmt_upper.keys() lab_upper = {k.upper():v for k,v in labels.items()} lab_keys = lab_upper.keys() if encode_errors is None: encode_errors = 'fail' bpc = self._sb.pyenc[0] if char_lengths and str(char_lengths).strip() in ['1','2','3','4']: bpc = int(char_lengths) if char_lengths and str(char_lengths) == 'exact': CnotB = False else: CnotB = bpc == 1 if type(char_lengths) is not dict or len(char_lengths) < ncols: charlens = self._sb.df_char_lengths(df, encode_errors, char_lengths) else: charlens = char_lengths if charlens is None: return -1 chr_upper = {k.upper():v for k,v in charlens.items()} if type(df.index) != pd.RangeIndex: warnings.warn("Note that Indexes are not transferred over as columns. Only actual coulmns are transferred") for name in df.columns: colname = str(name) col_up = colname.upper() input += "'"+colname+"'n " if col_up in lab_keys: label += "label '"+colname+"'n ="+lab_upper[col_up]+";\n" if col_up in fmt_keys: format += "'"+colname+"'n "+fmt_upper[col_up]+" " if df.dtypes[name].kind in ('O','S','U','V'): try: length += " '"+colname+"'n $"+str(chr_upper[col_up]) except KeyError as e: print("Dictionary provided as char_lengths is missing column: "+colname) raise e if keep_outer_quotes: input += "~ " dts.append('C') if embedded_newlines: xlate += " '"+colname+"'n = translate('"+colname+"'n, '0A'x, "+lf+");\n" xlate += " '"+colname+"'n = translate('"+colname+"'n, '0D'x, "+cr+");\n" else: if df.dtypes[name].kind in ('M'): length += " '"+colname+"'n 8" input += ":B8601DT26.6 " if col_up not in dts_keys: if col_up not in fmt_keys: format += "'"+colname+"'n E8601DT26.6 " else: if dts_upper[col_up].lower() == 'date': if col_up not in fmt_keys: format += "'"+colname+"'n E8601DA. " xlate += " '"+colname+"'n = datepart('"+colname+"'n);\n" else: if dts_upper[col_up].lower() == 'time': if col_up not in fmt_keys: format += "'"+colname+"'n E8601TM. " xlate += " '"+colname+"'n = timepart('"+colname+"'n);\n" else: print("invalid value for datetimes for column "+colname+". Using default.") if col_up not in fmt_keys: format += "'"+colname+"'n E8601DT26.6 " dts.append('D') else: length += " '"+colname+"'n 8" if df.dtypes[name] == 'bool': dts.append('B') else: dts.append('N') code = "data " if len(libref): code += libref+"." code += "'"+table.strip()+"'n" if len(outdsopts): code += '(' for key in outdsopts: code += key+'='+str(outdsopts[key]) + ' ' code += ");\n" else: code += ";\n" if len(length): code += "length "+length+";\n" if len(format): code += "format "+format+";\n" code += label code += "infile datalines delimiter="+delim+" STOPOVER;\ninput @;\nif _infile_ = '' then delete;\ninput "+input+";\n"+xlate+";\ndatalines4;" self._asubmit(code, "text") blksz = int(kwargs.get('blocksize', 1000000)) noencode = self._sb.sascei == 'utf-8' or encode_errors == 'ignore' row_num = 0 code = "" for row in df.itertuples(index=False): row_num += 1 card = "" for col in range(ncols): var = str(row[col]) if dts[col] == 'N' and var == 'nan': var = '.' elif dts[col] == 'C': if var == 'nan' or len(var) == 0: var = ' ' else: var = var.replace(colsep, colrep) elif dts[col] == 'B': var = str(int(row[col])) elif dts[col] == 'D': if var in ['nan', 'NaT', 'NaN']: var = '.' else: var = str(row[col].to_datetime64())[:26] card += var if col < (ncols-1): card += colsep if embedded_newlines: card = card.replace(LF, colrep).replace(CR, colrep) card = card.replace('\n', LF).replace('\r', CR) code += card+"\n" if len(code) > blksz: if not noencode: if encode_errors == 'fail': if CnotB: try: chk = code.encode(self.sascfg.encoding) except Exception as e: self._asubmit(";;;;\n;;;;", "text") ll = self.submit("run;", 'text') print("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num)) print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e)) return row_num else: code = code.encode(self.sascfg.encoding, errors='replace').decode(self.sascfg.encoding) self._asubmit(code, "text") code = "" if not noencode and len(code) > 0: if encode_errors == 'fail': if CnotB: try: code = code.encode(self.sascfg.encoding).decode(self.sascfg.encoding) except Exception as e: self._asubmit(";;;;\n;;;;", "text") ll = self.submit("run;", 'text') print("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num)) print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e)) return row_num else: code = code.encode(self.sascfg.encoding, errors='replace').decode(self.sascfg.encoding) self._asubmit(code+";;;;\n;;;;", "text") ll = self.submit("quit;", 'text') return None def sasdata2dataframe(self, table: str, libref: str ='', dsopts: dict = None, rowsep: str = '\x01', colsep: str = '\x02', rowrep: str = ' ', colrep: str = ' ', **kwargs) -> '<Pandas Data Frame object>': ''' This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. table - the name of the SAS Data Set you want to export to a Pandas Data Frame libref - the libref for the SAS Data Set. dsopts - data set options for the input SAS Data Set rowsep - the row seperator character to use; defaults to '\x01' colsep - the column seperator character to use; defaults to '\x02' rowrep - the char to convert to for any embedded rowsep chars, defaults to ' ' colrep - the char to convert to for any embedded colsep chars, defaults to ' ' ''' dsopts = dsopts if dsopts is not None else {} method = kwargs.pop('method', None) if method and method.lower() == 'csv': return self.sasdata2dataframeCSV(table, libref, dsopts, **kwargs) #elif method and method.lower() == 'disk': else: return self.sasdata2dataframeDISK(table, libref, dsopts, rowsep, colsep, rowrep, colrep, **kwargs) def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict =None, opts: dict = None, **kwargs) -> '<Pandas Data Frame object>': ''' This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. table - the name of the SAS Data Set you want to export to a Pandas Data Frame libref - the libref for the SAS Data Set. dsopts - data set options for the input SAS Data Set opts - a dictionary containing any of the following Proc Export options(delimiter, putnames) tempfile - DEPRECATED tempkeep - DEPRECATED These two options are for advanced usage. They override how saspy imports data. For more info see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts= ''' tmp = kwargs.pop('tempfile', None) tmp = kwargs.pop('tempkeep', None) dsopts = dsopts if dsopts is not None else {} opts = opts if opts is not None else {} if libref: tabname = libref+".'"+table.strip()+"'n " else: tabname = "'"+table.strip()+"'n " code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n" ll = self.submit(code, "text") ##GET Data Table Info #conn = self.sascfg.HTTPConn; conn.connect() #headers={"Accept":"application/vnd.sas.compute.data.table+json", "Authorization":"Bearer "+self.sascfg._token} #conn.request('GET', "/compute/sessions/"+self.pid+"/data/work/sasdata2dataframe", headers=headers) #req = conn.getresponse() #status = req.status #conn.close() #resp = req.read() #js = json.loads(resp.decode(self.sascfg.encoding)) conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token} conn.request('GET', "/compute/sessions/"+self.pid+"/data/work/sasdata2dataframe/columns?start=0&limit=9999999", headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() js = json.loads(resp.decode(self.sascfg.encoding)) varlist = [] vartype = [] nvars = js.get('count') lst = js.get('items') for i in range(len(lst)): varlist.append(lst[i].get('name')) vartype.append(lst[i].get('type')) topts = dict(dsopts) topts.pop('firstobs', None) topts.pop('obs', None) code = "data work._n_u_l_l_;output;run;\n" code += "data _null_; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n" for i in range(nvars): code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n" code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;" ll = self.submit(code, "text") l2 = ll['LOG'].rpartition("FMT_CATS=") l2 = l2[2].partition("\n") varcat = l2[2].split("\n", nvars) del varcat[nvars] code = "proc delete data=work.sasdata2dataframe(memtype=view);run;\n" code += "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";\nformat " idx_col = kwargs.pop('index_col', False) eng = kwargs.pop('engine', 'c') my_fmts = kwargs.pop('my_fmts', False) k_dts = kwargs.pop('dtype', None) if k_dts is None and my_fmts: print("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.") my_fmts = False if not my_fmts: for i in range(nvars): if vartype[i] == 'FLOAT': code += "'"+varlist[i]+"'n " if varcat[i] in self._sb.sas_date_fmts: code += 'E8601DA10. ' else: if varcat[i] in self._sb.sas_time_fmts: code += 'E8601TM15.6 ' else: if varcat[i] in self._sb.sas_datetime_fmts: code += 'E8601DT26.6 ' else: code += 'best32. ' code += ";run;\n" ll = self.submit(code, "text") if k_dts is None: dts = {} for i in range(nvars): if vartype[i] == 'FLOAT': if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts: dts[varlist[i]] = 'float' else: dts[varlist[i]] = 'str' else: dts[varlist[i]] = 'str' else: dts = k_dts code = "filename _tomodsx '"+self._sb.workpath+"_tomodsx' lrecl="+str(self.sascfg.lrecl)+" recfm=v encoding='utf-8';\n" code += "proc export data=work.sasdata2dataframe outfile=_tomodsx dbms=csv replace;\n" code += self._sb._expopts(opts)+" run;\n" code += "proc delete data=work.sasdata2dataframe(memtype=view);run;\n" ll = self.submit(code, 'text') logf = ll['LOG'] code = "filename _sp_updn '"+self._sb.workpath+"_tomodsx' recfm=F encoding=binary lrecl=4096;" ll = self.submit(code, "text") logf += ll['LOG'] # GET data conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"*/*","Content-Type":"application/octet-stream", "Authorization":"Bearer "+self.sascfg._token} conn.request('GET', self._uri_files+"/_sp_updn/content", headers=headers) req = conn.getresponse() status = req.status sockout = _read_sock(req=req) df = pd.read_csv(sockout, index_col=idx_col, encoding='utf8', engine=eng, dtype=dts, **kwargs) conn.close() if k_dts is None: # don't override these if user provided their own dtypes for i in range(nvars): if vartype[i] == 'FLOAT': if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts: df[varlist[i]] = pd.to_datetime(df[varlist[i]], errors='coerce') ll = self.submit("filename _sp_updn;", 'text') logf += ll['LOG'] return df def sasdata2dataframeDISK(self, table: str, libref: str ='', dsopts: dict = None, rowsep: str = '\x01', colsep: str = '\x02', rowrep: str = ' ', colrep: str = ' ', **kwargs) -> '<Pandas Data Frame object>': ''' This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. table - the name of the SAS Data Set you want to export to a Pandas Data Frame libref - the libref for the SAS Data Set. dsopts - data set options for the input SAS Data Set rowsep - the row seperator character to use; defaults to '\x01' colsep - the column seperator character to use; defaults to '\x02' rowrep - the char to convert to for any embedded rowsep chars, defaults to ' ' colrep - the char to convert to for any embedded colsep chars, defaults to ' ' tempfile - DEPRECATED tempkeep - DEPRECATED These two options are for advanced usage. They override how saspy imports data. For more info see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts= ''' tmp = kwargs.pop('tempfile', None) tmp = kwargs.pop('tempkeep', None) dsopts = dsopts if dsopts is not None else {} if libref: tabname = libref+".'"+table.strip()+"'n " else: tabname = "'"+table.strip()+"'n " code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n" ll = self.submit(code, "text") ##GET Data Table Info #conn = self.sascfg.HTTPConn; conn.connect() #headers={"Accept":"application/vnd.sas.compute.data.table+json", "Authorization":"Bearer "+self.sascfg._token} #conn.request('GET', "/compute/sessions/"+self.pid+"/data/work/sasdata2dataframe", headers=headers) #req = conn.getresponse() #status = req.status #conn.close() #resp = req.read() #js = json.loads(resp.decode(self.sascfg.encoding)) conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token} conn.request('GET', "/compute/sessions/"+self.pid+"/data/work/sasdata2dataframe/columns?start=0&limit=9999999", headers=headers) req = conn.getresponse() status = req.status resp = req.read() conn.close() js = json.loads(resp.decode(self.sascfg.encoding)) varlist = [] vartype = [] nvars = js.get('count') lst = js.get('items') for i in range(len(lst)): varlist.append(lst[i].get('name')) vartype.append(lst[i].get('type')) topts = dict(dsopts) topts.pop('firstobs', None) topts.pop('obs', None) code = "proc delete data=work.sasdata2dataframe(memtype=view);run;" code += "data work._n_u_l_l_;output;run;\n" code += "data _null_; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n" for i in range(nvars): code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n" code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;" ll = self.submit(code, "text") l2 = ll['LOG'].rpartition("FMT_CATS=") l2 = l2[2].partition("\n") varcat = l2[2].split("\n", nvars) del varcat[nvars] rdelim = "'"+'%02x' % ord(rowsep.encode(self.sascfg.encoding))+"'x" cdelim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x " idx_col = kwargs.pop('index_col', False) eng = kwargs.pop('engine', 'c') my_fmts = kwargs.pop('my_fmts', False) k_dts = kwargs.pop('dtype', None) if k_dts is None and my_fmts: print("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.") my_fmts = False code = "filename _tomodsx '"+self._sb.workpath+"_tomodsx' recfm=v termstr=NL encoding='utf-8';\n" code += "data _null_; set "+tabname+self._sb._dsopts(dsopts)+";\n" if not my_fmts: for i in range(nvars): if vartype[i] == 'FLOAT': code += "format '"+varlist[i]+"'n " if varcat[i] in self._sb.sas_date_fmts: code += 'E8601DA10.' else: if varcat[i] in self._sb.sas_time_fmts: code += 'E8601TM15.6' else: if varcat[i] in self._sb.sas_datetime_fmts: code += 'E8601DT26.6' else: code += 'best32.' code += '; ' if i % 10 == 0: code +='\n' code += "\nfile _tomodsx lrecl="+str(self.sascfg.lrecl)+" dlm="+cdelim+" recfm=v termstr=NL encoding='utf-8';\n" for i in range(nvars): if vartype[i] != 'FLOAT': code += "'"+varlist[i]+"'n = translate('" code += varlist[i]+"'n, '{}'x, '{}'x); ".format( \ '%02x%02x' % \ (ord(rowrep.encode(self.sascfg.encoding)), \ ord(colrep.encode(self.sascfg.encoding))), '%02x%02x' % \ (ord(rowsep.encode(self.sascfg.encoding)), \ ord(colsep.encode(self.sascfg.encoding)))) if i % 10 == 0: code +='\n' code += "\nput " for i in range(nvars): code += " '"+varlist[i]+"'n " if i % 10 == 0: code +='\n' code += rdelim+";\nrun;" ll = self.submit(code, "text") if k_dts is None: dts = {} for i in range(nvars): if vartype[i] == 'FLOAT': if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts: dts[varlist[i]] = 'float' else: dts[varlist[i]] = 'str' else: dts[varlist[i]] = 'str' else: dts = k_dts miss = ['.', ' '] quoting = kwargs.pop('quoting', 3) code = "filename _sp_updn '"+self._sb.workpath+"_tomodsx' recfm=F encoding=binary lrecl=4096;" ll = self.submit(code, "text") logf = ll['LOG'] # GET data conn = self.sascfg.HTTPConn; conn.connect() headers={"Accept":"*/*","Content-Type":"application/octet-stream", "Authorization":"Bearer "+self.sascfg._token} conn.request('GET', self._uri_files+"/_sp_updn/content", headers=headers) req = conn.getresponse() status = req.status sockout = _read_sock(req=req, method='DISK', rsep=(colsep+rowsep+'\n').encode(), rowsep=rowsep.encode()) df = pd.read_csv(sockout, index_col=idx_col, engine=eng, header=None, names=varlist, sep=colsep, lineterminator=rowsep, dtype=dts, na_values=miss, encoding='utf-8', quoting=quoting, **kwargs) conn.close() if k_dts is None: # don't override these if user provided their own dtypes for i in range(nvars): if vartype[i] == 'FLOAT': if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts: df[varlist[i]] = pd.to_datetime(df[varlist[i]], errors='coerce') ll = self.submit("filename _sp_updn;", 'text') logf += ll['LOG'] return df class _read_sock(io.StringIO): def __init__(self, **kwargs): self.req = kwargs.get('req') self.method = kwargs.get('method', 'CSV') self.rowsep = kwargs.get('rowsep', b'\n') self.rsep = kwargs.get('rsep', self.rowsep) self.datar = b"" def read(self, size=4096): datl = 0 size = max(size, 4096) notarow = True while datl < size or notarow: data = self.req.read(size) dl = len(data) if dl: datl += dl self.datar += data if notarow: notarow = self.datar.count(self.rsep) <= 0 else: if len(self.datar) <= 0: return '' else: break data = self.datar.rpartition(self.rsep) if self.method == 'DISK': datap = (data[0]+data[1]).replace(self.rsep, self.rowsep) else: datap = data[0]+data[1] self.datar = data[2] return datap.decode()
apache-2.0
ChanderG/scikit-learn
examples/linear_model/plot_ols.py
220
1940
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Linear Regression Example ========================================================= This example uses the only the first feature of the `diabetes` dataset, in order to illustrate a two-dimensional plot of this regression technique. The straight line can be seen in the plot, showing how linear regression attempts to draw a straight line that will best minimize the residual sum of squares between the observed responses in the dataset, and the responses predicted by the linear approximation. The coefficients, the residual sum of squares and the variance score are also calculated. """ print(__doc__) # Code source: Jaques Grobler # License: BSD 3 clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets, linear_model # Load the diabetes dataset diabetes = datasets.load_diabetes() # Use only one feature diabetes_X = diabetes.data[:, np.newaxis, 2] # Split the data into training/testing sets diabetes_X_train = diabetes_X[:-20] diabetes_X_test = diabetes_X[-20:] # Split the targets into training/testing sets diabetes_y_train = diabetes.target[:-20] diabetes_y_test = diabetes.target[-20:] # Create linear regression object regr = linear_model.LinearRegression() # Train the model using the training sets regr.fit(diabetes_X_train, diabetes_y_train) # The coefficients print('Coefficients: \n', regr.coef_) # The mean square error print("Residual sum of squares: %.2f" % np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test)) # Plot outputs plt.scatter(diabetes_X_test, diabetes_y_test, color='black') plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue', linewidth=3) plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
jpautom/scikit-learn
examples/bicluster/plot_spectral_biclustering.py
403
2011
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the Spectral Biclustering algorithm. The data is generated with the ``make_checkerboard`` function, then shuffled and passed to the Spectral Biclustering algorithm. The rows and columns of the shuffled matrix are rearranged to show the biclusters found by the algorithm. The outer product of the row and column label vectors shows a representation of the checkerboard structure. """ print(__doc__) # Author: Kemal Eren <[email protected]> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_checkerboard from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralBiclustering from sklearn.metrics import consensus_score n_clusters = (4, 3) data, rows, columns = make_checkerboard( shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralBiclustering(n_clusters=n_clusters, method='log', random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.1f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.matshow(np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1), cmap=plt.cm.Blues) plt.title("Checkerboard structure of rearranged data") plt.show()
bsd-3-clause
murali-munna/scikit-learn
sklearn/tests/test_naive_bayes.py
142
17496
import pickle from io import BytesIO import numpy as np import scipy.sparse from sklearn.datasets import load_digits, load_iris from sklearn.cross_validation import cross_val_score, train_test_split from sklearn.externals.six.moves import zip from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB # Data is just 6 separable points in the plane X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) y = np.array([1, 1, 1, 2, 2, 2]) # A bit more random tests rng = np.random.RandomState(0) X1 = rng.normal(size=(10, 3)) y1 = (rng.normal(size=(10)) > 0).astype(np.int) # Data is 6 random integer points in a 100 dimensional space classified to # three classes. X2 = rng.randint(5, size=(6, 100)) y2 = np.array([1, 1, 2, 2, 3, 3]) def test_gnb(): # Gaussian Naive Bayes classification. # This checks that GaussianNB implements fit and predict and returns # correct values for a simple toy dataset. clf = GaussianNB() y_pred = clf.fit(X, y).predict(X) assert_array_equal(y_pred, y) y_pred_proba = clf.predict_proba(X) y_pred_log_proba = clf.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8) # Test whether label mismatch between target y and classes raises # an Error # FIXME Remove this test once the more general partial_fit tests are merged assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1]) def test_gnb_prior(): # Test whether class priors are properly set. clf = GaussianNB().fit(X, y) assert_array_almost_equal(np.array([3, 3]) / 6.0, clf.class_prior_, 8) clf.fit(X1, y1) # Check that the class priors sum to 1 assert_array_almost_equal(clf.class_prior_.sum(), 1) def test_gnb_sample_weight(): """Test whether sample weights are properly used in GNB. """ # Sample weights all being 1 should not change results sw = np.ones(6) clf = GaussianNB().fit(X, y) clf_sw = GaussianNB().fit(X, y, sw) assert_array_almost_equal(clf.theta_, clf_sw.theta_) assert_array_almost_equal(clf.sigma_, clf_sw.sigma_) # Fitting twice with half sample-weights should result # in same result as fitting once with full weights sw = rng.rand(y.shape[0]) clf1 = GaussianNB().fit(X, y, sample_weight=sw) clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2) clf2.partial_fit(X, y, sample_weight=sw / 2) assert_array_almost_equal(clf1.theta_, clf2.theta_) assert_array_almost_equal(clf1.sigma_, clf2.sigma_) # Check that duplicate entries and correspondingly increased sample # weights yield the same result ind = rng.randint(0, X.shape[0], 20) sample_weight = np.bincount(ind, minlength=X.shape[0]) clf_dupl = GaussianNB().fit(X[ind], y[ind]) clf_sw = GaussianNB().fit(X, y, sample_weight) assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_) assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_) def test_discrete_prior(): # Test whether class priors are properly set. for cls in [BernoulliNB, MultinomialNB]: clf = cls().fit(X2, y2) assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0), clf.class_log_prior_, 8) def test_mnnb(): # Test Multinomial Naive Bayes classification. # This checks that MultinomialNB implements fit and predict and returns # correct values for a simple toy dataset. for X in [X2, scipy.sparse.csr_matrix(X2)]: # Check the ability to predict the learning set. clf = MultinomialNB() assert_raises(ValueError, clf.fit, -X, y2) y_pred = clf.fit(X, y2).predict(X) assert_array_equal(y_pred, y2) # Verify that np.log(clf.predict_proba(X)) gives the same results as # clf.predict_log_proba(X) y_pred_proba = clf.predict_proba(X) y_pred_log_proba = clf.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8) # Check that incremental fitting yields the same results clf2 = MultinomialNB() clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2)) clf2.partial_fit(X[2:5], y2[2:5]) clf2.partial_fit(X[5:], y2[5:]) y_pred2 = clf2.predict(X) assert_array_equal(y_pred2, y2) y_pred_proba2 = clf2.predict_proba(X) y_pred_log_proba2 = clf2.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8) assert_array_almost_equal(y_pred_proba2, y_pred_proba) assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba) # Partial fit on the whole data at once should be the same as fit too clf3 = MultinomialNB() clf3.partial_fit(X, y2, classes=np.unique(y2)) y_pred3 = clf3.predict(X) assert_array_equal(y_pred3, y2) y_pred_proba3 = clf3.predict_proba(X) y_pred_log_proba3 = clf3.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8) assert_array_almost_equal(y_pred_proba3, y_pred_proba) assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba) def check_partial_fit(cls): clf1 = cls() clf1.fit([[0, 1], [1, 0]], [0, 1]) clf2 = cls() clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1]) assert_array_equal(clf1.class_count_, clf2.class_count_) assert_array_equal(clf1.feature_count_, clf2.feature_count_) clf3 = cls() clf3.partial_fit([[0, 1]], [0], classes=[0, 1]) clf3.partial_fit([[1, 0]], [1]) assert_array_equal(clf1.class_count_, clf3.class_count_) assert_array_equal(clf1.feature_count_, clf3.feature_count_) def test_discretenb_partial_fit(): for cls in [MultinomialNB, BernoulliNB]: yield check_partial_fit, cls def test_gnb_partial_fit(): clf = GaussianNB().fit(X, y) clf_pf = GaussianNB().partial_fit(X, y, np.unique(y)) assert_array_almost_equal(clf.theta_, clf_pf.theta_) assert_array_almost_equal(clf.sigma_, clf_pf.sigma_) assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_) clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y)) clf_pf2.partial_fit(X[1::2], y[1::2]) assert_array_almost_equal(clf.theta_, clf_pf2.theta_) assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_) assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_) def test_discretenb_pickle(): # Test picklability of discrete naive Bayes classifiers for cls in [BernoulliNB, MultinomialNB, GaussianNB]: clf = cls().fit(X2, y2) y_pred = clf.predict(X2) store = BytesIO() pickle.dump(clf, store) clf = pickle.load(BytesIO(store.getvalue())) assert_array_equal(y_pred, clf.predict(X2)) if cls is not GaussianNB: # TODO re-enable me when partial_fit is implemented for GaussianNB # Test pickling of estimator trained with partial_fit clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2)) clf2.partial_fit(X2[3:], y2[3:]) store = BytesIO() pickle.dump(clf2, store) clf2 = pickle.load(BytesIO(store.getvalue())) assert_array_equal(y_pred, clf2.predict(X2)) def test_input_check_fit(): # Test input checks for the fit method for cls in [BernoulliNB, MultinomialNB, GaussianNB]: # check shape consistency for number of samples at fit time assert_raises(ValueError, cls().fit, X2, y2[:-1]) # check shape consistency for number of input features at predict time clf = cls().fit(X2, y2) assert_raises(ValueError, clf.predict, X2[:, :-1]) def test_input_check_partial_fit(): for cls in [BernoulliNB, MultinomialNB]: # check shape consistency assert_raises(ValueError, cls().partial_fit, X2, y2[:-1], classes=np.unique(y2)) # classes is required for first call to partial fit assert_raises(ValueError, cls().partial_fit, X2, y2) # check consistency of consecutive classes values clf = cls() clf.partial_fit(X2, y2, classes=np.unique(y2)) assert_raises(ValueError, clf.partial_fit, X2, y2, classes=np.arange(42)) # check consistency of input shape for partial_fit assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2) # check consistency of input shape for predict assert_raises(ValueError, clf.predict, X2[:, :-1]) def test_discretenb_predict_proba(): # Test discrete NB classes' probability scores # The 100s below distinguish Bernoulli from multinomial. # FIXME: write a test to show this. X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]] X_multinomial = [[0, 1], [1, 3], [4, 0]] # test binary case (1-d output) y = [0, 0, 2] # 2 is regression test for binary case, 02e673 for cls, X in zip([BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial]): clf = cls().fit(X, y) assert_equal(clf.predict(X[-1]), 2) assert_equal(clf.predict_proba(X[0]).shape, (1, 2)) assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1), np.array([1., 1.]), 6) # test multiclass case (2-d output, must sum to one) y = [0, 1, 2] for cls, X in zip([BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial]): clf = cls().fit(X, y) assert_equal(clf.predict_proba(X[0]).shape, (1, 3)) assert_equal(clf.predict_proba(X[:2]).shape, (2, 3)) assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1) assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1) assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1) assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1) def test_discretenb_uniform_prior(): # Test whether discrete NB classes fit a uniform prior # when fit_prior=False and class_prior=None for cls in [BernoulliNB, MultinomialNB]: clf = cls() clf.set_params(fit_prior=False) clf.fit([[0], [0], [1]], [0, 0, 1]) prior = np.exp(clf.class_log_prior_) assert_array_equal(prior, np.array([.5, .5])) def test_discretenb_provide_prior(): # Test whether discrete NB classes use provided prior for cls in [BernoulliNB, MultinomialNB]: clf = cls(class_prior=[0.5, 0.5]) clf.fit([[0], [0], [1]], [0, 0, 1]) prior = np.exp(clf.class_log_prior_) assert_array_equal(prior, np.array([.5, .5])) # Inconsistent number of classes with prior assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2]) assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1], classes=[0, 1, 1]) def test_discretenb_provide_prior_with_partial_fit(): # Test whether discrete NB classes use provided prior # when using partial_fit iris = load_iris() iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split( iris.data, iris.target, test_size=0.4, random_state=415) for cls in [BernoulliNB, MultinomialNB]: for prior in [None, [0.3, 0.3, 0.4]]: clf_full = cls(class_prior=prior) clf_full.fit(iris.data, iris.target) clf_partial = cls(class_prior=prior) clf_partial.partial_fit(iris_data1, iris_target1, classes=[0, 1, 2]) clf_partial.partial_fit(iris_data2, iris_target2) assert_array_almost_equal(clf_full.class_log_prior_, clf_partial.class_log_prior_) def test_sample_weight_multiclass(): for cls in [BernoulliNB, MultinomialNB]: # check shape consistency for number of samples at fit time yield check_sample_weight_multiclass, cls def check_sample_weight_multiclass(cls): X = [ [0, 0, 1], [0, 1, 1], [0, 1, 1], [1, 0, 0], ] y = [0, 0, 1, 2] sample_weight = np.array([1, 1, 2, 2], dtype=np.float) sample_weight /= sample_weight.sum() clf = cls().fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), [0, 1, 1, 2]) # Check sample weight using the partial_fit method clf = cls() clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2], sample_weight=sample_weight[:2]) clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3]) clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:]) assert_array_equal(clf.predict(X), [0, 1, 1, 2]) def test_sample_weight_mnb(): clf = MultinomialNB() clf.fit([[1, 2], [1, 2], [1, 0]], [0, 0, 1], sample_weight=[1, 1, 4]) assert_array_equal(clf.predict([1, 0]), [1]) positive_prior = np.exp(clf.intercept_[0]) assert_array_almost_equal([1 - positive_prior, positive_prior], [1 / 3., 2 / 3.]) def test_coef_intercept_shape(): # coef_ and intercept_ should have shapes as in other linear models. # Non-regression test for issue #2127. X = [[1, 0, 0], [1, 1, 1]] y = [1, 2] # binary classification for clf in [MultinomialNB(), BernoulliNB()]: clf.fit(X, y) assert_equal(clf.coef_.shape, (1, 3)) assert_equal(clf.intercept_.shape, (1,)) def test_check_accuracy_on_digits(): # Non regression test to make sure that any further refactoring / optim # of the NB models do not harm the performance on a slightly non-linearly # separable dataset digits = load_digits() X, y = digits.data, digits.target binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8) X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8] # Multinomial NB scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10) assert_greater(scores.mean(), 0.86) scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10) assert_greater(scores.mean(), 0.94) # Bernoulli NB scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10) assert_greater(scores.mean(), 0.83) scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10) assert_greater(scores.mean(), 0.92) # Gaussian NB scores = cross_val_score(GaussianNB(), X, y, cv=10) assert_greater(scores.mean(), 0.77) scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10) assert_greater(scores.mean(), 0.86) def test_feature_log_prob_bnb(): # Test for issue #4268. # Tests that the feature log prob value computed by BernoulliNB when # alpha=1.0 is equal to the expression given in Manning, Raghavan, # and Schuetze's "Introduction to Information Retrieval" book: # http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]]) Y = np.array([0, 0, 1, 2, 2]) # Fit Bernoulli NB w/ alpha = 1.0 clf = BernoulliNB(alpha=1.0) clf.fit(X, Y) # Manually form the (log) numerator and denominator that # constitute P(feature presence | class) num = np.log(clf.feature_count_ + 1.0) denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T # Check manual estimate matches assert_array_equal(clf.feature_log_prob_, (num - denom)) def test_bnb(): # Tests that BernoulliNB when alpha=1.0 gives the same values as # those given for the toy example in Manning, Raghavan, and # Schuetze's "Introduction to Information Retrieval" book: # http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html # Training data points are: # Chinese Beijing Chinese (class: China) # Chinese Chinese Shanghai (class: China) # Chinese Macao (class: China) # Tokyo Japan Chinese (class: Japan) # Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo X = np.array([[1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 1, 1, 0, 0, 1]]) # Classes are China (0), Japan (1) Y = np.array([0, 0, 0, 1]) # Fit BernoulliBN w/ alpha = 1.0 clf = BernoulliNB(alpha=1.0) clf.fit(X, Y) # Check the class prior is correct class_prior = np.array([0.75, 0.25]) assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior) # Check the feature probabilities are correct feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2], [1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]]) assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob) # Testing data point is: # Chinese Chinese Chinese Tokyo Japan X_test = np.array([0, 1, 1, 0, 0, 1]) # Check the predictive probabilities are correct unnorm_predict_proba = np.array([[0.005183999999999999, 0.02194787379972565]]) predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba) assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
bsd-3-clause
imrehg/labhardware
projects/beamprofile/beamprofile.py
2
2353
from __future__ import division import pydc1394 as fw from time import sleep, time import numpy as np import pylab as pl from matplotlib import cm from matplotlib.ticker import LinearLocator, FixedLocator, FormatStrFormatter import matplotlib import fastfit import interface def sizetext(sx, sy): """ Check if the difference between the two axes are similar or different Returns displayable text """ if abs(sx - sy)/(sx+sy) < 0.05: csign = '~' elif (sx > sy): csign = '>' else: csign = '<' ctext = "wx | wy\n%.1f %s %.1f" %(sx, csign, sy) return ctext if __name__ == "__main__": l = fw.DC1394Library() cams = l.enumerate_cameras() channel = int(raw_input("Which camera you want to see (0/1)? ")) cam0 = fw.Camera(l, cams[channel]['guid'], isospeed=800) print "Connected to: %s / %s" %(cam0.vendor, cam0.model) # Settings cam0.framerate.mode = 'manual' cam0.framerate.val = 25 cam0.exposure.mode = 'manual' cam0.exposure.val = cam0.exposure.range[0] cam0.shutter.mode = 'manual' cam0.shutter.val = cam0.shutter.range[0] print "\nFeatures\n", "="*30 for feat in cam0.features: try: val = cam0.__getattribute__(feat).val except: val = '??' try: mode = cam0.__getattribute__(feat).mode except: mode = '??' print "%s : %s (mode: %s)" %(feat, val, mode) print "Camera modes:", cam0.modes cam0.mode = "640x480_Y8" # the Y16 mode does not seem to work print "Used camera mode: %s" %(cam0.mode) matplotlib.interactive(True) fs = 12.5 fig = pl.figure(num=1, figsize=(fs, fs)) ax = fig.add_subplot(111) cam0.start(interactive=True) elements = None dimx, dimy = 640, 480 pixelsize = 5.6 while True: # image collection and display try: data = np.array(cam0.current_image, dtype='f') if elements is None: # First display, set up output screen elements = interface.createiface(data) else: # Every other iteration just update data interface.updateiface(data, elements) pl.draw() except KeyboardInterrupt: print "Stopping" break except: break cam0.stop()
mit
andaag/scikit-learn
sklearn/feature_extraction/text.py
110
50157
# -*- coding: utf-8 -*- # Authors: Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Lars Buitinck <[email protected]> # Robert Layton <[email protected]> # Jochen Wersdörfer <[email protected]> # Roman Sinayev <[email protected]> # # License: BSD 3 clause """ The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to build feature vectors from text documents. """ from __future__ import unicode_literals import array from collections import Mapping, defaultdict import numbers from operator import itemgetter import re import unicodedata import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.six.moves import xrange from ..preprocessing import normalize from .hashing import FeatureHasher from .stop_words import ENGLISH_STOP_WORDS from ..utils import deprecated from ..utils.fixes import frombuffer_empty, bincount from ..utils.validation import check_is_fitted __all__ = ['CountVectorizer', 'ENGLISH_STOP_WORDS', 'TfidfTransformer', 'TfidfVectorizer', 'strip_accents_ascii', 'strip_accents_unicode', 'strip_tags'] def strip_accents_unicode(s): """Transform accentuated unicode symbols into their simple counterpart Warning: the python-level loop and join operations make this implementation 20 times slower than the strip_accents_ascii basic normalization. See also -------- strip_accents_ascii Remove accentuated char for any unicode symbol that has a direct ASCII equivalent. """ return ''.join([c for c in unicodedata.normalize('NFKD', s) if not unicodedata.combining(c)]) def strip_accents_ascii(s): """Transform accentuated unicode symbols into ascii or nothing Warning: this solution is only suited for languages that have a direct transliteration to ASCII symbols. See also -------- strip_accents_unicode Remove accentuated char for any unicode symbol. """ nkfd_form = unicodedata.normalize('NFKD', s) return nkfd_form.encode('ASCII', 'ignore').decode('ASCII') def strip_tags(s): """Basic regexp based HTML / XML tag stripper function For serious HTML/XML preprocessing you should rather use an external library such as lxml or BeautifulSoup. """ return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s) def _check_stop_list(stop): if stop == "english": return ENGLISH_STOP_WORDS elif isinstance(stop, six.string_types): raise ValueError("not a built-in stop list: %s" % stop) elif stop is None: return None else: # assume it's a collection return frozenset(stop) class VectorizerMixin(object): """Provides common code for text vectorizers (tokenization logic).""" _white_spaces = re.compile(r"\s\s+") def decode(self, doc): """Decode the input into a string of unicode symbols The decoding strategy depends on the vectorizer parameters. """ if self.input == 'filename': with open(doc, 'rb') as fh: doc = fh.read() elif self.input == 'file': doc = doc.read() if isinstance(doc, bytes): doc = doc.decode(self.encoding, self.decode_error) if doc is np.nan: raise ValueError("np.nan is an invalid document, expected byte or " "unicode string.") return doc def _word_ngrams(self, tokens, stop_words=None): """Turn tokens into a sequence of n-grams after stop words filtering""" # handle stop words if stop_words is not None: tokens = [w for w in tokens if w not in stop_words] # handle token n-grams min_n, max_n = self.ngram_range if max_n != 1: original_tokens = tokens tokens = [] n_original_tokens = len(original_tokens) for n in xrange(min_n, min(max_n + 1, n_original_tokens + 1)): for i in xrange(n_original_tokens - n + 1): tokens.append(" ".join(original_tokens[i: i + n])) return tokens def _char_ngrams(self, text_document): """Tokenize text_document into a sequence of character n-grams""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) text_len = len(text_document) ngrams = [] min_n, max_n = self.ngram_range for n in xrange(min_n, min(max_n + 1, text_len + 1)): for i in xrange(text_len - n + 1): ngrams.append(text_document[i: i + n]) return ngrams def _char_wb_ngrams(self, text_document): """Whitespace sensitive char-n-gram tokenization. Tokenize text_document into a sequence of character n-grams excluding any whitespace (operating only inside word boundaries)""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) min_n, max_n = self.ngram_range ngrams = [] for w in text_document.split(): w = ' ' + w + ' ' w_len = len(w) for n in xrange(min_n, max_n + 1): offset = 0 ngrams.append(w[offset:offset + n]) while offset + n < w_len: offset += 1 ngrams.append(w[offset:offset + n]) if offset == 0: # count a short word (w_len < n) only once break return ngrams def build_preprocessor(self): """Return a function to preprocess the text before tokenization""" if self.preprocessor is not None: return self.preprocessor # unfortunately python functools package does not have an efficient # `compose` function that would have allowed us to chain a dynamic # number of functions. However the cost of a lambda call is a few # hundreds of nanoseconds which is negligible when compared to the # cost of tokenizing a string of 1000 chars for instance. noop = lambda x: x # accent stripping if not self.strip_accents: strip_accents = noop elif callable(self.strip_accents): strip_accents = self.strip_accents elif self.strip_accents == 'ascii': strip_accents = strip_accents_ascii elif self.strip_accents == 'unicode': strip_accents = strip_accents_unicode else: raise ValueError('Invalid value for "strip_accents": %s' % self.strip_accents) if self.lowercase: return lambda x: strip_accents(x.lower()) else: return strip_accents def build_tokenizer(self): """Return a function that splits a string into a sequence of tokens""" if self.tokenizer is not None: return self.tokenizer token_pattern = re.compile(self.token_pattern) return lambda doc: token_pattern.findall(doc) def get_stop_words(self): """Build or fetch the effective stop words list""" return _check_stop_list(self.stop_words) def build_analyzer(self): """Return a callable that handles preprocessing and tokenization""" if callable(self.analyzer): return self.analyzer preprocess = self.build_preprocessor() if self.analyzer == 'char': return lambda doc: self._char_ngrams(preprocess(self.decode(doc))) elif self.analyzer == 'char_wb': return lambda doc: self._char_wb_ngrams( preprocess(self.decode(doc))) elif self.analyzer == 'word': stop_words = self.get_stop_words() tokenize = self.build_tokenizer() return lambda doc: self._word_ngrams( tokenize(preprocess(self.decode(doc))), stop_words) else: raise ValueError('%s is not a valid tokenization scheme/analyzer' % self.analyzer) def _validate_vocabulary(self): vocabulary = self.vocabulary if vocabulary is not None: if not isinstance(vocabulary, Mapping): vocab = {} for i, t in enumerate(vocabulary): if vocab.setdefault(t, i) != i: msg = "Duplicate term in vocabulary: %r" % t raise ValueError(msg) vocabulary = vocab else: indices = set(six.itervalues(vocabulary)) if len(indices) != len(vocabulary): raise ValueError("Vocabulary contains repeated indices.") for i in xrange(len(vocabulary)): if i not in indices: msg = ("Vocabulary of size %d doesn't contain index " "%d." % (len(vocabulary), i)) raise ValueError(msg) if not vocabulary: raise ValueError("empty vocabulary passed to fit") self.fixed_vocabulary_ = True self.vocabulary_ = dict(vocabulary) else: self.fixed_vocabulary_ = False def _check_vocabulary(self): """Check if vocabulary is empty or missing (not fit-ed)""" msg = "%(name)s - Vocabulary wasn't fitted." check_is_fitted(self, 'vocabulary_', msg=msg), if len(self.vocabulary_) == 0: raise ValueError("Vocabulary is empty") @property @deprecated("The `fixed_vocabulary` attribute is deprecated and will be " "removed in 0.18. Please use `fixed_vocabulary_` instead.") def fixed_vocabulary(self): return self.fixed_vocabulary_ class HashingVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token occurrences It turns a collection of text documents into a scipy.sparse matrix holding token occurrence counts (or binary occurrence information), possibly normalized as token frequencies if norm='l1' or projected on the euclidean unit sphere if norm='l2'. This text vectorizer implementation uses the hashing trick to find the token string name to feature integer index mapping. This strategy has several advantages: - it is very low memory scalable to large datasets as there is no need to store a vocabulary dictionary in memory - it is fast to pickle and un-pickle as it holds no state besides the constructor parameters - it can be used in a streaming (partial fit) or parallel pipeline as there is no state computed during fit. There are also a couple of cons (vs using a CountVectorizer with an in-memory vocabulary): - there is no way to compute the inverse transform (from feature indices to string feature names) which can be a problem when trying to introspect which features are most important to a model. - there can be collisions: distinct tokens can be mapped to the same feature index. However in practice this is rarely an issue if n_features is large enough (e.g. 2 ** 18 for text classification problems). - no IDF weighting as this would render the transformer stateful. The hash function employed is the signed 32-bit version of Murmurhash3. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, default='utf-8' If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. ngram_range : tuple (min_n, max_n), default=(1, 1) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. lowercase : boolean, default=True Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). n_features : integer, default=(2 ** 20) The number of features (columns) in the output matrices. Small numbers of features are likely to cause hash collisions, but large numbers will cause larger coefficient dimensions in linear learners. norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. binary: boolean, default=False. If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype: type, optional Type of the matrix returned by fit_transform() or transform(). non_negative : boolean, default=False Whether output matrices should contain non-negative values only; effectively calls abs on the matrix prior to returning it. When True, output values can be interpreted as frequencies. When False, output values will have expected value zero. See also -------- CountVectorizer, TfidfVectorizer """ def __init__(self, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20), binary=False, norm='l2', non_negative=False, dtype=np.float64): self.input = input self.encoding = encoding self.decode_error = decode_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.n_features = n_features self.ngram_range = ngram_range self.binary = binary self.norm = norm self.non_negative = non_negative self.dtype = dtype def partial_fit(self, X, y=None): """Does nothing: this transformer is stateless. This method is just there to mark the fact that this transformer can work in a streaming setup. """ return self def fit(self, X, y=None): """Does nothing: this transformer is stateless.""" # triggers a parameter validation self._get_hasher().fit(X, y=y) return self def transform(self, X, y=None): """Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. y : (ignored) Returns ------- X : scipy.sparse matrix, shape = (n_samples, self.n_features) Document-term matrix. """ analyzer = self.build_analyzer() X = self._get_hasher().transform(analyzer(doc) for doc in X) if self.binary: X.data.fill(1) if self.norm is not None: X = normalize(X, norm=self.norm, copy=False) return X # Alias transform to fit_transform for convenience fit_transform = transform def _get_hasher(self): return FeatureHasher(n_features=self.n_features, input_type='string', dtype=self.dtype, non_negative=self.non_negative) def _document_frequency(X): """Count the number of non-zero values for each feature in sparse X.""" if sp.isspmatrix_csr(X): return bincount(X.indices, minlength=X.shape[1]) else: return np.diff(sp.csc_matrix(X, copy=False).indptr) class CountVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token counts This implementation produces a sparse representation of the counts using scipy.sparse.coo_matrix. If you do not provide an a-priori dictionary and you do not use an analyzer that does some kind of feature selection then the number of features will be equal to the vocabulary size found by analyzing the data. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. Only applies if ``analyzer == 'word'``. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, True by default Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp select tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : int or None, default=None If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. Indices in the mapping should not be repeated and should not have any gap between 0 and the largest index. binary : boolean, default=False If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype : type, optional Type of the matrix returned by fit_transform() or transform(). Attributes ---------- vocabulary_ : dict A mapping of terms to feature indices. stop_words_ : set Terms that were ignored because they either: - occurred in too many documents (`max_df`) - occurred in too few documents (`min_df`) - were cut off by feature selection (`max_features`). This is only available if no vocabulary was given. See also -------- HashingVectorizer, TfidfVectorizer Notes ----- The ``stop_words_`` attribute can get large and increase the model size when pickling. This attribute is provided only for introspection and can be safely removed using delattr or set to None before pickling. """ def __init__(self, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64): self.input = input self.encoding = encoding self.decode_error = decode_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.max_df = max_df self.min_df = min_df if max_df < 0 or min_df < 0: raise ValueError("negative value for max_df of min_df") self.max_features = max_features if max_features is not None: if (not isinstance(max_features, numbers.Integral) or max_features <= 0): raise ValueError( "max_features=%r, neither a positive integer nor None" % max_features) self.ngram_range = ngram_range self.vocabulary = vocabulary self.binary = binary self.dtype = dtype def _sort_features(self, X, vocabulary): """Sort features by name Returns a reordered matrix and modifies the vocabulary in place """ sorted_features = sorted(six.iteritems(vocabulary)) map_index = np.empty(len(sorted_features), dtype=np.int32) for new_val, (term, old_val) in enumerate(sorted_features): map_index[new_val] = old_val vocabulary[term] = new_val return X[:, map_index] def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features. """ if high is None and low is None and limit is None: return X, set() # Calculate a mask based on document frequencies dfs = _document_frequency(X) tfs = np.asarray(X.sum(axis=0)).ravel() mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 # maps old indices to new removed_terms = set() for term, old_index in list(six.iteritems(vocabulary)): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError("After pruning, no terms remain. Try a lower" " min_df or a higher max_df.") return X[:, kept_indices], removed_terms def _count_vocab(self, raw_documents, fixed_vocab): """Create sparse feature matrix, and vocabulary where fixed_vocab=False """ if fixed_vocab: vocabulary = self.vocabulary_ else: # Add a new value when a new vocabulary item is seen vocabulary = defaultdict() vocabulary.default_factory = vocabulary.__len__ analyze = self.build_analyzer() j_indices = _make_int_array() indptr = _make_int_array() indptr.append(0) for doc in raw_documents: for feature in analyze(doc): try: j_indices.append(vocabulary[feature]) except KeyError: # Ignore out-of-vocabulary items for fixed_vocab=True continue indptr.append(len(j_indices)) if not fixed_vocab: # disable defaultdict behaviour vocabulary = dict(vocabulary) if not vocabulary: raise ValueError("empty vocabulary; perhaps the documents only" " contain stop words") j_indices = frombuffer_empty(j_indices, dtype=np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) values = np.ones(len(j_indices)) X = sp.csr_matrix((values, j_indices, indptr), shape=(len(indptr) - 1, len(vocabulary)), dtype=self.dtype) X.sum_duplicates() return vocabulary, X def fit(self, raw_documents, y=None): """Learn a vocabulary dictionary of all tokens in the raw documents. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- self """ self.fit_transform(raw_documents) return self def fit_transform(self, raw_documents, y=None): """Learn the vocabulary dictionary and return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : array, [n_samples, n_features] Document-term matrix. """ # We intentionally don't call the transform method to make # fit_transform overridable without unwanted side effects in # TfidfVectorizer. self._validate_vocabulary() max_df = self.max_df min_df = self.min_df max_features = self.max_features vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_) if self.binary: X.data.fill(1) if not self.fixed_vocabulary_: X = self._sort_features(X, vocabulary) n_doc = X.shape[0] max_doc_count = (max_df if isinstance(max_df, numbers.Integral) else max_df * n_doc) min_doc_count = (min_df if isinstance(min_df, numbers.Integral) else min_df * n_doc) if max_doc_count < min_doc_count: raise ValueError( "max_df corresponds to < documents than min_df") X, self.stop_words_ = self._limit_features(X, vocabulary, max_doc_count, min_doc_count, max_features) self.vocabulary_ = vocabulary return X def transform(self, raw_documents): """Transform documents to document-term matrix. Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided to the constructor. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : sparse matrix, [n_samples, n_features] Document-term matrix. """ if not hasattr(self, 'vocabulary_'): self._validate_vocabulary() self._check_vocabulary() # use the same matrix-building strategy as fit_transform _, X = self._count_vocab(raw_documents, fixed_vocab=True) if self.binary: X.data.fill(1) return X def inverse_transform(self, X): """Return terms per document with nonzero entries in X. Parameters ---------- X : {array, sparse matrix}, shape = [n_samples, n_features] Returns ------- X_inv : list of arrays, len = n_samples List of arrays of terms. """ self._check_vocabulary() if sp.issparse(X): # We need CSR format for fast row manipulations. X = X.tocsr() else: # We need to convert X to a matrix, so that the indexing # returns 2D objects X = np.asmatrix(X) n_samples = X.shape[0] terms = np.array(list(self.vocabulary_.keys())) indices = np.array(list(self.vocabulary_.values())) inverse_vocabulary = terms[np.argsort(indices)] return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel() for i in range(n_samples)] def get_feature_names(self): """Array mapping from feature integer indices to feature name""" self._check_vocabulary() return [t for t, i in sorted(six.iteritems(self.vocabulary_), key=itemgetter(1))] def _make_int_array(): """Construct an array.array of a type suitable for scipy.sparse indices.""" return array.array(str("i")) class TfidfTransformer(BaseEstimator, TransformerMixin): """Transform a count matrix to a normalized tf or tf-idf representation Tf means term-frequency while tf-idf means term-frequency times inverse document-frequency. This is a common term weighting scheme in information retrieval, that has also found good use in document classification. The goal of using tf-idf instead of the raw frequencies of occurrence of a token in a given document is to scale down the impact of tokens that occur very frequently in a given corpus and that are hence empirically less informative than features that occur in a small fraction of the training corpus. The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf, instead of tf * idf. The effect of this is that terms with zero idf, i.e. that occur in all documents of a training set, will not be entirely ignored. The formulas used to compute tf and idf depend on parameter settings that correspond to the SMART notation used in IR, as follows: Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True. Idf is "t" when use_idf is given, "n" (none) otherwise. Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, default=True Enable inverse-document-frequency reweighting. smooth_idf : boolean, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). References ---------- .. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 68-74.` .. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 118-120.` """ def __init__(self, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): self.norm = norm self.use_idf = use_idf self.smooth_idf = smooth_idf self.sublinear_tf = sublinear_tf def fit(self, X, y=None): """Learn the idf vector (global term weights) Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts """ if not sp.issparse(X): X = sp.csc_matrix(X) if self.use_idf: n_samples, n_features = X.shape df = _document_frequency(X) # perform idf smoothing if required df += int(self.smooth_idf) n_samples += int(self.smooth_idf) # log+1 instead of log makes sure terms with zero idf don't get # suppressed entirely. idf = np.log(float(n_samples) / df) + 1.0 self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, n=n_features) return self def transform(self, X, copy=True): """Transform a count matrix to a tf or tf-idf representation Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts copy : boolean, default True Whether to copy X and operate on the copy or perform in-place operations. Returns ------- vectors : sparse matrix, [n_samples, n_features] """ if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float): # preserve float family dtype X = sp.csr_matrix(X, copy=copy) else: # convert counts or binary occurrences to floats X = sp.csr_matrix(X, dtype=np.float64, copy=copy) n_samples, n_features = X.shape if self.sublinear_tf: np.log(X.data, X.data) X.data += 1 if self.use_idf: check_is_fitted(self, '_idf_diag', 'idf vector is not fitted') expected_n_features = self._idf_diag.shape[0] if n_features != expected_n_features: raise ValueError("Input has n_features=%d while the model" " has been trained with n_features=%d" % ( n_features, expected_n_features)) # *= doesn't work X = X * self._idf_diag if self.norm: X = normalize(X, norm=self.norm, copy=False) return X @property def idf_(self): if hasattr(self, "_idf_diag"): return np.ravel(self._idf_diag.sum(axis=0)) else: return None class TfidfVectorizer(CountVectorizer): """Convert a collection of raw documents to a matrix of TF-IDF features. Equivalent to CountVectorizer followed by TfidfTransformer. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char'} or callable Whether the feature should be made of word or character n-grams. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If a string, it is passed to _check_stop_list and the appropriate stop list is returned. 'english' is currently the only supported string value. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, default True Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : int or None, default=None If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. binary : boolean, default=False If True, all non-zero term counts are set to 1. This does not mean outputs will have only 0/1 values, only that the tf term in tf-idf is binary. (Set idf and normalization to False to get 0/1 outputs.) dtype : type, optional Type of the matrix returned by fit_transform() or transform(). norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, default=True Enable inverse-document-frequency reweighting. smooth_idf : boolean, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). Attributes ---------- idf_ : array, shape = [n_features], or None The learned idf vector (global term weights) when ``use_idf`` is set to True, None otherwise. stop_words_ : set Terms that were ignored because they either: - occurred in too many documents (`max_df`) - occurred in too few documents (`min_df`) - were cut off by feature selection (`max_features`). This is only available if no vocabulary was given. See also -------- CountVectorizer Tokenize the documents and count the occurrences of token and return them as a sparse matrix TfidfTransformer Apply Term Frequency Inverse Document Frequency normalization to a sparse matrix of occurrence counts. Notes ----- The ``stop_words_`` attribute can get large and increase the model size when pickling. This attribute is provided only for introspection and can be safely removed using delattr or set to None before pickling. """ def __init__(self, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, analyzer='word', stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): super(TfidfVectorizer, self).__init__( input=input, encoding=encoding, decode_error=decode_error, strip_accents=strip_accents, lowercase=lowercase, preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer, stop_words=stop_words, token_pattern=token_pattern, ngram_range=ngram_range, max_df=max_df, min_df=min_df, max_features=max_features, vocabulary=vocabulary, binary=binary, dtype=dtype) self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf) # Broadcast the TF-IDF parameters to the underlying transformer instance # for easy grid search and repr @property def norm(self): return self._tfidf.norm @norm.setter def norm(self, value): self._tfidf.norm = value @property def use_idf(self): return self._tfidf.use_idf @use_idf.setter def use_idf(self, value): self._tfidf.use_idf = value @property def smooth_idf(self): return self._tfidf.smooth_idf @smooth_idf.setter def smooth_idf(self, value): self._tfidf.smooth_idf = value @property def sublinear_tf(self): return self._tfidf.sublinear_tf @sublinear_tf.setter def sublinear_tf(self, value): self._tfidf.sublinear_tf = value @property def idf_(self): return self._tfidf.idf_ def fit(self, raw_documents, y=None): """Learn vocabulary and idf from training set. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- self : TfidfVectorizer """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) return self def fit_transform(self, raw_documents, y=None): """Learn vocabulary and idf, return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) # X is already a transformed view of raw_documents so # we set copy to False return self._tfidf.transform(X, copy=False) def transform(self, raw_documents, copy=True): """Transform documents to document-term matrix. Uses the vocabulary and document frequencies (df) learned by fit (or fit_transform). Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects copy : boolean, default True Whether to copy X and operate on the copy or perform in-place operations. Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted') X = super(TfidfVectorizer, self).transform(raw_documents) return self._tfidf.transform(X, copy=False)
bsd-3-clause
natanielruiz/android-yolo
jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py
4
3299
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests of the DataFrame class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.contrib.learn.python import learn from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks def setup_test_df(): """Create a dataframe populated with some test columns.""" df = learn.DataFrame() df["a"] = learn.TransformedSeries( [mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", tf.int32))], mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1") df["b"] = learn.TransformedSeries( [mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", tf.int32))], mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2") df["c"] = learn.TransformedSeries( [mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", tf.int32))], mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1") return df class DataFrameTest(tf.test.TestCase): """Test of `DataFrame`.""" def test_create(self): df = setup_test_df() self.assertEqual(df.columns(), frozenset(["a", "b", "c"])) def test_select_columns(self): df = setup_test_df() df2 = df.select_columns(["a", "c"]) self.assertEqual(df2.columns(), frozenset(["a", "c"])) def test_get_item(self): df = setup_test_df() c1 = df["b"] self.assertEqual(mocks.MockTensor("Mock Tensor 2", tf.int32), c1.build()) def test_set_item_column(self): df = setup_test_df() self.assertEqual(3, len(df)) col1 = mocks.MockSeries("QuackColumn", mocks.MockTensor("Tensor ", tf.int32)) df["quack"] = col1 self.assertEqual(4, len(df)) col2 = df["quack"] self.assertEqual(col1, col2) def test_set_item_column_multi(self): df = setup_test_df() self.assertEqual(3, len(df)) col1 = mocks.MockSeries("QuackColumn", []) col2 = mocks.MockSeries("MooColumn", []) df["quack", "moo"] = [col1, col2] self.assertEqual(5, len(df)) col3 = df["quack"] self.assertEqual(col1, col3) col4 = df["moo"] self.assertEqual(col2, col4) def test_set_item_pandas(self): # TODO(jamieas) pass def test_set_item_numpy(self): # TODO(jamieas) pass def test_build(self): df = setup_test_df() result = df.build() expected = {"a": mocks.MockTensor("Mock Tensor 1", tf.int32), "b": mocks.MockTensor("Mock Tensor 2", tf.int32), "c": mocks.MockTensor("Mock Tensor 1", tf.int32)} self.assertEqual(expected, result) if __name__ == "__main__": tf.test.main()
apache-2.0
xzh86/scikit-learn
examples/feature_selection/plot_permutation_test_for_classification.py
250
2233
""" ================================================================= Test with permutations the significance of a classification score ================================================================= In order to test if a classification score is significative a technique in repeating the classification procedure after randomizing, permuting, the labels. The p-value is then given by the percentage of runs for which the score obtained is greater than the classification score obtained in the first place. """ # Author: Alexandre Gramfort <[email protected]> # License: BSD 3 clause print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold, permutation_test_score from sklearn import datasets ############################################################################## # Loading a dataset iris = datasets.load_iris() X = iris.data y = iris.target n_classes = np.unique(y).size # Some noisy data not correlated random = np.random.RandomState(seed=0) E = random.normal(size=(len(X), 2200)) # Add noisy data to the informative features for make the task harder X = np.c_[X, E] svm = SVC(kernel='linear') cv = StratifiedKFold(y, 2) score, permutation_scores, pvalue = permutation_test_score( svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1) print("Classification score %s (pvalue : %s)" % (score, pvalue)) ############################################################################### # View histogram of permutation scores plt.hist(permutation_scores, 20, label='Permutation scores') ylim = plt.ylim() # BUG: vlines(..., linestyle='--') fails on older versions of matplotlib #plt.vlines(score, ylim[0], ylim[1], linestyle='--', # color='g', linewidth=3, label='Classification Score' # ' (pvalue %s)' % pvalue) #plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--', # color='k', linewidth=3, label='Luck') plt.plot(2 * [score], ylim, '--g', linewidth=3, label='Classification Score' ' (pvalue %s)' % pvalue) plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck') plt.ylim(ylim) plt.legend() plt.xlabel('Score') plt.show()
bsd-3-clause
valexandersaulys/airbnb_kaggle_contest
venv/lib/python3.4/site-packages/sklearn/utils/testing.py
6
26970
"""Testing utilities.""" # Copyright (c) 2011, 2012 # Authors: Pietro Berkes, # Andreas Muller # Mathieu Blondel # Olivier Grisel # Arnaud Joly # Denis Engemann # Giorgio Patrini # License: BSD 3 clause import os import inspect import pkgutil import warnings import sys import re import platform import struct import scipy as sp import scipy.io from functools import wraps try: # Python 2 from urllib2 import urlopen from urllib2 import HTTPError except ImportError: # Python 3+ from urllib.request import urlopen from urllib.error import HTTPError import tempfile import shutil import os.path as op import atexit # WindowsError only exist on Windows try: WindowsError except NameError: WindowsError = None import sklearn from sklearn.base import BaseEstimator from sklearn.externals import joblib # Conveniently import all assertions in one place. from nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools import assert_true from nose.tools import assert_false from nose.tools import assert_raises from nose.tools import raises from nose import SkipTest from nose import with_setup from numpy.testing import assert_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_less from numpy.testing import assert_approx_equal import numpy as np from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin) from sklearn.cluster import DBSCAN __all__ = ["assert_equal", "assert_not_equal", "assert_raises", "assert_raises_regexp", "raises", "with_setup", "assert_true", "assert_false", "assert_almost_equal", "assert_array_equal", "assert_array_almost_equal", "assert_array_less", "assert_less", "assert_less_equal", "assert_greater", "assert_greater_equal", "assert_approx_equal"] try: from nose.tools import assert_in, assert_not_in except ImportError: # Nose < 1.0.0 def assert_in(x, container): assert_true(x in container, msg="%r in %r" % (x, container)) def assert_not_in(x, container): assert_false(x in container, msg="%r in %r" % (x, container)) try: from nose.tools import assert_raises_regex except ImportError: # for Python 2 def assert_raises_regex(expected_exception, expected_regexp, callable_obj=None, *args, **kwargs): """Helper function to check for message patterns in exceptions""" not_raised = False try: callable_obj(*args, **kwargs) not_raised = True except expected_exception as e: error_message = str(e) if not re.compile(expected_regexp).search(error_message): raise AssertionError("Error message should match pattern " "%r. %r does not." % (expected_regexp, error_message)) if not_raised: raise AssertionError("%s not raised by %s" % (expected_exception.__name__, callable_obj.__name__)) # assert_raises_regexp is deprecated in Python 3.4 in favor of # assert_raises_regex but lets keep the bacward compat in scikit-learn with # the old name for now assert_raises_regexp = assert_raises_regex def _assert_less(a, b, msg=None): message = "%r is not lower than %r" % (a, b) if msg is not None: message += ": " + msg assert a < b, message def _assert_greater(a, b, msg=None): message = "%r is not greater than %r" % (a, b) if msg is not None: message += ": " + msg assert a > b, message def assert_less_equal(a, b, msg=None): message = "%r is not lower than or equal to %r" % (a, b) if msg is not None: message += ": " + msg assert a <= b, message def assert_greater_equal(a, b, msg=None): message = "%r is not greater than or equal to %r" % (a, b) if msg is not None: message += ": " + msg assert a >= b, message def assert_warns(warning_class, func, *args, **kw): """Test that a certain warning occurs. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. func : callable Calable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func` Returns ------- result : the return value of `func` """ # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Trigger a warning. result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = any(warning.category is warning_class for warning in w) if not found: raise AssertionError("%s did not give warning: %s( is %s)" % (func.__name__, warning_class, w)) return result def assert_warns_message(warning_class, message, func, *args, **kw): # very important to avoid uncontrolled state propagation """Test that a certain warning occurs and with a certain message. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. message : str | callable The entire message or a substring to test for. If callable, it takes a string as argument and will trigger an assertion error if it returns `False`. func : callable Calable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func`. Returns ------- result : the return value of `func` """ clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") if hasattr(np, 'VisibleDeprecationWarning'): # Let's not catch the numpy internal DeprecationWarnings warnings.simplefilter('ignore', np.VisibleDeprecationWarning) # Trigger a warning. result = func(*args, **kw) # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = [issubclass(warning.category, warning_class) for warning in w] if not any(found): raise AssertionError("No warning raised for %s with class " "%s" % (func.__name__, warning_class)) message_found = False # Checks the message of all warnings belong to warning_class for index in [i for i, x in enumerate(found) if x]: # substring will match, the entire message with typo won't msg = w[index].message # For Python 3 compatibility msg = str(msg.args[0] if hasattr(msg, 'args') else msg) if callable(message): # add support for certain tests check_in_message = message else: check_in_message = lambda msg: message in msg if check_in_message(msg): message_found = True break if not message_found: raise AssertionError("Did not receive the message you expected " "('%s') for <%s>, got: '%s'" % (message, func.__name__, msg)) return result # To remove when we support numpy 1.7 def assert_no_warnings(func, *args, **kw): # XXX: once we may depend on python >= 2.6, this can be replaced by the # warnings module context manager. # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] if len(w) > 0: raise AssertionError("Got warnings when calling %s: %s" % (func.__name__, w)) return result def ignore_warnings(obj=None): """ Context manager and decorator to ignore warnings Note. Using this (in both variants) will clear all warnings from all python modules loaded. In case you need to test cross-module-warning-logging this is not your tool of choice. Examples -------- >>> with ignore_warnings(): ... warnings.warn('buhuhuhu') >>> def nasty_warn(): ... warnings.warn('buhuhuhu') ... print(42) >>> ignore_warnings(nasty_warn)() 42 """ if callable(obj): return _ignore_warnings(obj) else: return _IgnoreWarnings() def _ignore_warnings(fn): """Decorator to catch and hide warnings without visual nesting""" @wraps(fn) def wrapper(*args, **kwargs): # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') return fn(*args, **kwargs) w[:] = [] return wrapper class _IgnoreWarnings(object): """Improved and simplified Python warnings context manager Copied from Python 2.7.5 and modified as required. """ def __init__(self): """ Parameters ========== category : warning class The category to filter. Defaults to Warning. If None, all categories will be muted. """ self._record = True self._module = sys.modules['warnings'] self._entered = False self.log = [] def __repr__(self): args = [] if self._record: args.append("record=True") if self._module is not sys.modules['warnings']: args.append("module=%r" % self._module) name = type(self).__name__ return "%s(%s)" % (name, ", ".join(args)) def __enter__(self): clean_warning_registry() # be safe and not propagate state + chaos warnings.simplefilter('always') if self._entered: raise RuntimeError("Cannot enter %r twice" % self) self._entered = True self._filters = self._module.filters self._module.filters = self._filters[:] self._showwarning = self._module.showwarning if self._record: self.log = [] def showwarning(*args, **kwargs): self.log.append(warnings.WarningMessage(*args, **kwargs)) self._module.showwarning = showwarning return self.log else: return None def __exit__(self, *exc_info): if not self._entered: raise RuntimeError("Cannot exit %r without entering first" % self) self._module.filters = self._filters self._module.showwarning = self._showwarning self.log[:] = [] clean_warning_registry() # be safe and not propagate state + chaos try: from nose.tools import assert_less except ImportError: assert_less = _assert_less try: from nose.tools import assert_greater except ImportError: assert_greater = _assert_greater def _assert_allclose(actual, desired, rtol=1e-7, atol=0, err_msg='', verbose=True): actual, desired = np.asanyarray(actual), np.asanyarray(desired) if np.allclose(actual, desired, rtol=rtol, atol=atol): return msg = ('Array not equal to tolerance rtol=%g, atol=%g: ' 'actual %s, desired %s') % (rtol, atol, actual, desired) raise AssertionError(msg) if hasattr(np.testing, 'assert_allclose'): assert_allclose = np.testing.assert_allclose else: assert_allclose = _assert_allclose def assert_raise_message(exceptions, message, function, *args, **kwargs): """Helper function to test error messages in exceptions Parameters ---------- exceptions : exception or tuple of exception Name of the estimator func : callable Calable object to raise error *args : the positional arguments to `func`. **kw : the keyword arguments to `func` """ try: function(*args, **kwargs) except exceptions as e: error_message = str(e) if message not in error_message: raise AssertionError("Error message does not include the expected" " string: %r. Observed error message: %r" % (message, error_message)) else: # concatenate exception names if isinstance(exceptions, tuple): names = " or ".join(e.__name__ for e in exceptions) else: names = exceptions.__name__ raise AssertionError("%s not raised by %s" % (names, function.__name__)) def fake_mldata(columns_dict, dataname, matfile, ordering=None): """Create a fake mldata data set. Parameters ---------- columns_dict : dict, keys=str, values=ndarray Contains data as columns_dict[column_name] = array of data. dataname : string Name of data set. matfile : string or file object The file name string or the file-like object of the output file. ordering : list, default None List of column_names, determines the ordering in the data set. Notes ----- This function transposes all arrays, while fetch_mldata only transposes 'data', keep that into account in the tests. """ datasets = dict(columns_dict) # transpose all variables for name in datasets: datasets[name] = datasets[name].T if ordering is None: ordering = sorted(list(datasets.keys())) # NOTE: setting up this array is tricky, because of the way Matlab # re-packages 1D arrays datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)), dtype='object') for i, name in enumerate(ordering): datasets['mldata_descr_ordering'][0, i] = name scipy.io.savemat(matfile, datasets, oned_as='column') class mock_mldata_urlopen(object): def __init__(self, mock_datasets): """Object that mocks the urlopen function to fake requests to mldata. `mock_datasets` is a dictionary of {dataset_name: data_dict}, or {dataset_name: (data_dict, ordering). `data_dict` itself is a dictionary of {column_name: data_array}, and `ordering` is a list of column_names to determine the ordering in the data set (see `fake_mldata` for details). When requesting a dataset with a name that is in mock_datasets, this object creates a fake dataset in a StringIO object and returns it. Otherwise, it raises an HTTPError. """ self.mock_datasets = mock_datasets def __call__(self, urlname): dataset_name = urlname.split('/')[-1] if dataset_name in self.mock_datasets: resource_name = '_' + dataset_name from io import BytesIO matfile = BytesIO() dataset = self.mock_datasets[dataset_name] ordering = None if isinstance(dataset, tuple): dataset, ordering = dataset fake_mldata(dataset, resource_name, matfile, ordering) matfile.seek(0) return matfile else: raise HTTPError(urlname, 404, dataset_name + " is not available", [], None) def install_mldata_mock(mock_datasets): # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets) def uninstall_mldata_mock(): # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = urlopen # Meta estimators need another estimator to be instantiated. META_ESTIMATORS = ["OneVsOneClassifier", "OutputCodeClassifier", "OneVsRestClassifier", "RFE", "RFECV", "BaseEnsemble"] # estimators that there is no way to default-construct sensibly OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV", "SelectFromModel"] # some trange ones DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'TfidfTransformer', 'TfidfVectorizer', 'IsotonicRegression', 'OneHotEncoder', 'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier', 'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures', 'GaussianRandomProjectionHash', 'HashingVectorizer', 'CheckingClassifier', 'PatchExtractor', 'CountVectorizer', # GradientBoosting base estimators, maybe should # exclude them in another way 'ZeroEstimator', 'ScaledLogOddsEstimator', 'QuantileEstimator', 'MeanEstimator', 'LogOddsEstimator', 'PriorProbabilityEstimator', '_SigmoidCalibration', 'VotingClassifier'] def all_estimators(include_meta_estimators=False, include_other=False, type_filter=None, include_dont_test=False): """Get a list of all estimators from sklearn. This function crawls the module and gets all classes that inherit from BaseEstimator. Classes that are defined in test-modules are not included. By default meta_estimators such as GridSearchCV are also not included. Parameters ---------- include_meta_estimators : boolean, default=False Whether to include meta-estimators that can be constructed using an estimator as their first argument. These are currently BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier, OneVsRestClassifier, RFE, RFECV. include_other : boolean, default=False Wether to include meta-estimators that are somehow special and can not be default-constructed sensibly. These are currently Pipeline, FeatureUnion and GridSearchCV include_dont_test : boolean, default=False Whether to include "special" label estimator or test processors. type_filter : string, list of string, or None, default=None Which kind of estimators should be returned. If None, no filter is applied and all estimators are returned. Possible values are 'classifier', 'regressor', 'cluster' and 'transformer' to get estimators only of these specific types, or a list of these to get the estimators that fit at least one of the types. Returns ------- estimators : list of tuples List of (name, class), where ``name`` is the class name as string and ``class`` is the actuall type of the class. """ def is_abstract(c): if not(hasattr(c, '__abstractmethods__')): return False if not len(c.__abstractmethods__): return False return True all_classes = [] # get parent folder path = sklearn.__path__ for importer, modname, ispkg in pkgutil.walk_packages( path=path, prefix='sklearn.', onerror=lambda x: None): if ".tests." in modname: continue module = __import__(modname, fromlist="dummy") classes = inspect.getmembers(module, inspect.isclass) all_classes.extend(classes) all_classes = set(all_classes) estimators = [c for c in all_classes if (issubclass(c[1], BaseEstimator) and c[0] != 'BaseEstimator')] # get rid of abstract base classes estimators = [c for c in estimators if not is_abstract(c[1])] if not include_dont_test: estimators = [c for c in estimators if not c[0] in DONT_TEST] if not include_other: estimators = [c for c in estimators if not c[0] in OTHER] # possibly get rid of meta estimators if not include_meta_estimators: estimators = [c for c in estimators if not c[0] in META_ESTIMATORS] if type_filter is not None: if not isinstance(type_filter, list): type_filter = [type_filter] else: type_filter = list(type_filter) # copy filtered_estimators = [] filters = {'classifier': ClassifierMixin, 'regressor': RegressorMixin, 'transformer': TransformerMixin, 'cluster': ClusterMixin} for name, mixin in filters.items(): if name in type_filter: type_filter.remove(name) filtered_estimators.extend([est for est in estimators if issubclass(est[1], mixin)]) estimators = filtered_estimators if type_filter: raise ValueError("Parameter type_filter must be 'classifier', " "'regressor', 'transformer', 'cluster' or None, got" " %s." % repr(type_filter)) # drop duplicates, sort for reproducibility return sorted(set(estimators)) def set_random_state(estimator, random_state=0): """Set random state of an estimator if it has the `random_state` param. Classes for whom random_state is deprecated are ignored. Currently DBSCAN is one such class. """ if isinstance(estimator, DBSCAN): return if "random_state" in estimator.get_params(): estimator.set_params(random_state=random_state) def if_matplotlib(func): """Test decorator that skips test if matplotlib not installed. """ @wraps(func) def run_test(*args, **kwargs): try: import matplotlib matplotlib.use('Agg', warn=False) # this fails if no $DISPLAY specified import matplotlib.pyplot as plt plt.figure() except ImportError: raise SkipTest('Matplotlib not available.') else: return func(*args, **kwargs) return run_test def skip_if_32bit(func): """Test decorator that skips tests on 32bit platforms.""" @wraps(func) def run_test(*args, **kwargs): bits = 8 * struct.calcsize("P") if bits == 32: raise SkipTest('Test skipped on 32bit platforms.') else: return func(*args, **kwargs) return run_test def if_not_mac_os(versions=('10.7', '10.8', '10.9'), message='Multi-process bug in Mac OS X >= 10.7 ' '(see issue #636)'): """Test decorator that skips test if OS is Mac OS X and its major version is one of ``versions``. """ warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed" " in 0.19: use the safer and more generic" " if_safe_multiprocessing_with_blas instead", DeprecationWarning) mac_version, _, _ = platform.mac_ver() skip = '.'.join(mac_version.split('.')[:2]) in versions def decorator(func): if skip: @wraps(func) def func(*args, **kwargs): raise SkipTest(message) return func return decorator def if_safe_multiprocessing_with_blas(func): """Decorator for tests involving both BLAS calls and multiprocessing Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with some implementation of BLAS (or other libraries that manage an internal posix thread pool) can cause a crash or a freeze of the Python process. In practice all known packaged distributions (from Linux distros or Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to only impact OSX users. This wrapper makes it possible to skip tests that can possibly cause this crash under OS X with. Under Python 3.4+ it is possible to use the `forkserver` start method for multiprocessing to avoid this issue. However it can cause pickling errors on interactively defined functions. It therefore not enabled by default. """ @wraps(func) def run_test(*args, **kwargs): if sys.platform == 'darwin': raise SkipTest( "Possible multi-process bug with some BLAS") return func(*args, **kwargs) return run_test def clean_warning_registry(): """Safe way to reset warnings """ warnings.resetwarnings() reg = "__warningregistry__" for mod_name, mod in list(sys.modules.items()): if 'six.moves' in mod_name: continue if hasattr(mod, reg): getattr(mod, reg).clear() def check_skip_network(): if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)): raise SkipTest("Text tutorial requires large dataset download") def check_skip_travis(): """Skip test if being run on Travis.""" if os.environ.get('TRAVIS') == "true": raise SkipTest("This test needs to be skipped on Travis") def _delete_folder(folder_path, warn=False): """Utility function to cleanup a temporary folder if still existing. Copy from joblib.pool (for independance)""" try: if os.path.exists(folder_path): # This can fail under windows, # but will succeed when called by atexit shutil.rmtree(folder_path) except WindowsError: if warn: warnings.warn("Could not delete temporary folder %s" % folder_path) class TempMemmap(object): def __init__(self, data, mmap_mode='r'): self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_') self.mmap_mode = mmap_mode self.data = data def __enter__(self): fpath = op.join(self.temp_folder, 'data.pkl') joblib.dump(self.data, fpath) data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode) atexit.register(lambda: _delete_folder(self.temp_folder, warn=True)) return data_read_only def __exit__(self, exc_type, exc_val, exc_tb): _delete_folder(self.temp_folder) with_network = with_setup(check_skip_network) with_travis = with_setup(check_skip_travis)
gpl-2.0
luo66/scikit-learn
sklearn/linear_model/stochastic_gradient.py
65
50308
# Authors: Peter Prettenhofer <[email protected]> (main author) # Mathieu Blondel (partial_fit support) # # License: BSD 3 clause """Classification and regression using Stochastic Gradient Descent (SGD).""" import numpy as np import scipy.sparse as sp from abc import ABCMeta, abstractmethod from ..externals.joblib import Parallel, delayed from .base import LinearClassifierMixin, SparseCoefMixin from .base import make_dataset from ..base import BaseEstimator, RegressorMixin from ..feature_selection.from_model import _LearntSelectorMixin from ..utils import (check_array, check_random_state, check_X_y, deprecated) from ..utils.extmath import safe_sparse_dot from ..utils.multiclass import _check_partial_fit_first_call from ..utils.validation import check_is_fitted from ..externals import six from .sgd_fast import plain_sgd, average_sgd from ..utils.fixes import astype from ..utils import compute_class_weight from .sgd_fast import Hinge from .sgd_fast import SquaredHinge from .sgd_fast import Log from .sgd_fast import ModifiedHuber from .sgd_fast import SquaredLoss from .sgd_fast import Huber from .sgd_fast import EpsilonInsensitive from .sgd_fast import SquaredEpsilonInsensitive LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3, "pa1": 4, "pa2": 5} PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3} DEFAULT_EPSILON = 0.1 # Default value of ``epsilon`` parameter. class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)): """Base class for SGD classification and regression.""" def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, warm_start=False, average=False): self.loss = loss self.penalty = penalty self.learning_rate = learning_rate self.epsilon = epsilon self.alpha = alpha self.C = C self.l1_ratio = l1_ratio self.fit_intercept = fit_intercept self.n_iter = n_iter self.shuffle = shuffle self.random_state = random_state self.verbose = verbose self.eta0 = eta0 self.power_t = power_t self.warm_start = warm_start self.average = average self._validate_params() self.coef_ = None if self.average > 0: self.standard_coef_ = None self.average_coef_ = None # iteration count for learning rate schedule # must not be int (e.g. if ``learning_rate=='optimal'``) self.t_ = None def set_params(self, *args, **kwargs): super(BaseSGD, self).set_params(*args, **kwargs) self._validate_params() return self @abstractmethod def fit(self, X, y): """Fit model.""" def _validate_params(self): """Validate input params. """ if not isinstance(self.shuffle, bool): raise ValueError("shuffle must be either True or False") if self.n_iter <= 0: raise ValueError("n_iter must be > zero") if not (0.0 <= self.l1_ratio <= 1.0): raise ValueError("l1_ratio must be in [0, 1]") if self.alpha < 0.0: raise ValueError("alpha must be >= 0") if self.learning_rate in ("constant", "invscaling"): if self.eta0 <= 0.0: raise ValueError("eta0 must be > 0") # raises ValueError if not registered self._get_penalty_type(self.penalty) self._get_learning_rate_type(self.learning_rate) if self.loss not in self.loss_functions: raise ValueError("The loss %s is not supported. " % self.loss) def _get_loss_function(self, loss): """Get concrete ``LossFunction`` object for str ``loss``. """ try: loss_ = self.loss_functions[loss] loss_class, args = loss_[0], loss_[1:] if loss in ('huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'): args = (self.epsilon, ) return loss_class(*args) except KeyError: raise ValueError("The loss %s is not supported. " % loss) def _get_learning_rate_type(self, learning_rate): try: return LEARNING_RATE_TYPES[learning_rate] except KeyError: raise ValueError("learning rate %s " "is not supported. " % learning_rate) def _get_penalty_type(self, penalty): penalty = str(penalty).lower() try: return PENALTY_TYPES[penalty] except KeyError: raise ValueError("Penalty %s is not supported. " % penalty) def _validate_sample_weight(self, sample_weight, n_samples): """Set the sample weight array.""" if sample_weight is None: # uniform sample weights sample_weight = np.ones(n_samples, dtype=np.float64, order='C') else: # user-provided array sample_weight = np.asarray(sample_weight, dtype=np.float64, order="C") if sample_weight.shape[0] != n_samples: raise ValueError("Shapes of X and sample_weight do not match.") return sample_weight def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None, intercept_init=None): """Allocate mem for parameters; initialize if provided.""" if n_classes > 2: # allocate coef_ for multi-class if coef_init is not None: coef_init = np.asarray(coef_init, order="C") if coef_init.shape != (n_classes, n_features): raise ValueError("Provided ``coef_`` does not match dataset. ") self.coef_ = coef_init else: self.coef_ = np.zeros((n_classes, n_features), dtype=np.float64, order="C") # allocate intercept_ for multi-class if intercept_init is not None: intercept_init = np.asarray(intercept_init, order="C") if intercept_init.shape != (n_classes, ): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init else: self.intercept_ = np.zeros(n_classes, dtype=np.float64, order="C") else: # allocate coef_ for binary problem if coef_init is not None: coef_init = np.asarray(coef_init, dtype=np.float64, order="C") coef_init = coef_init.ravel() if coef_init.shape != (n_features,): raise ValueError("Provided coef_init does not " "match dataset.") self.coef_ = coef_init else: self.coef_ = np.zeros(n_features, dtype=np.float64, order="C") # allocate intercept_ for binary problem if intercept_init is not None: intercept_init = np.asarray(intercept_init, dtype=np.float64) if intercept_init.shape != (1,) and intercept_init.shape != (): raise ValueError("Provided intercept_init " "does not match dataset.") self.intercept_ = intercept_init.reshape(1,) else: self.intercept_ = np.zeros(1, dtype=np.float64, order="C") # initialize average parameters if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = np.zeros(self.coef_.shape, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(self.standard_intercept_.shape, dtype=np.float64, order="C") def _prepare_fit_binary(est, y, i): """Initialization for fit_binary. Returns y, coef, intercept. """ y_i = np.ones(y.shape, dtype=np.float64, order="C") y_i[y != est.classes_[i]] = -1.0 average_intercept = 0 average_coef = None if len(est.classes_) == 2: if not est.average: coef = est.coef_.ravel() intercept = est.intercept_[0] else: coef = est.standard_coef_.ravel() intercept = est.standard_intercept_[0] average_coef = est.average_coef_.ravel() average_intercept = est.average_intercept_[0] else: if not est.average: coef = est.coef_[i] intercept = est.intercept_[i] else: coef = est.standard_coef_[i] intercept = est.standard_intercept_[i] average_coef = est.average_coef_[i] average_intercept = est.average_intercept_[i] return y_i, coef, intercept, average_coef, average_intercept def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter, pos_weight, neg_weight, sample_weight): """Fit a single binary classifier. The i'th class is considered the "positive" class. """ # if average is not true, average_coef, and average_intercept will be # unused y_i, coef, intercept, average_coef, average_intercept = \ _prepare_fit_binary(est, y, i) assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0] dataset, intercept_decay = make_dataset(X, y_i, sample_weight) penalty_type = est._get_penalty_type(est.penalty) learning_rate_type = est._get_learning_rate_type(learning_rate) # XXX should have random_state_! random_state = check_random_state(est.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) if not est.average: return plain_sgd(coef, intercept, est.loss_function, penalty_type, alpha, C, est.l1_ratio, dataset, n_iter, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay) else: standard_coef, standard_intercept, average_coef, \ average_intercept = average_sgd(coef, intercept, average_coef, average_intercept, est.loss_function, penalty_type, alpha, C, est.l1_ratio, dataset, n_iter, int(est.fit_intercept), int(est.verbose), int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type, est.eta0, est.power_t, est.t_, intercept_decay, est.average) if len(est.classes_) == 2: est.average_intercept_[0] = average_intercept else: est.average_intercept_[i] = average_intercept return standard_coef, standard_intercept class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD, LinearClassifierMixin)): loss_functions = { "hinge": (Hinge, 1.0), "squared_hinge": (SquaredHinge, 1.0), "perceptron": (Hinge, 0.0), "log": (Log, ), "modified_huber": (ModifiedHuber, ), "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, class_weight=None, warm_start=False, average=False): super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average) self.class_weight = class_weight self.classes_ = None self.n_jobs = int(n_jobs) def _partial_fit(self, X, y, alpha, C, loss, learning_rate, n_iter, classes, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C") n_samples, n_features = X.shape self._validate_params() _check_partial_fit_first_call(self, classes) n_classes = self.classes_.shape[0] # Allocate datastructures from input arguments self._expanded_class_weight = compute_class_weight(self.class_weight, self.classes_, y) sample_weight = self._validate_sample_weight(sample_weight, n_samples) if self.coef_ is None or coef_init is not None: self._allocate_parameter_mem(n_classes, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1])) self.loss_function = self._get_loss_function(loss) if self.t_ is None: self.t_ = 1.0 # delegate to concrete training procedure if n_classes > 2: self._fit_multiclass(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, n_iter=n_iter) elif n_classes == 2: self._fit_binary(X, y, alpha=alpha, C=C, learning_rate=learning_rate, sample_weight=sample_weight, n_iter=n_iter) else: raise ValueError("The number of class labels must be " "greater than one.") return self def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): if hasattr(self, "classes_"): self.classes_ = None X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C") n_samples, n_features = X.shape # labels can be encoded as float, int, or string literals # np.unique sorts in asc order; largest class id is positive class classes = np.unique(y) if self.warm_start and self.coef_ is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_coef_ = self.coef_ self.standard_intercept_ = self.intercept_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = None self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter, classes, sample_weight, coef_init, intercept_init) return self def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, n_iter): """Fit a binary classifier on X and y. """ coef, intercept = fit_binary(self, 1, X, y, alpha, C, learning_rate, n_iter, self._expanded_class_weight[1], self._expanded_class_weight[0], sample_weight) self.t_ += n_iter * X.shape[0] # need to be 2d if self.average > 0: if self.average <= self.t_ - 1: self.coef_ = self.average_coef_.reshape(1, -1) self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_.reshape(1, -1) self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ else: self.coef_ = coef.reshape(1, -1) # intercept is a float, need to convert it to an array of length 1 self.intercept_ = np.atleast_1d(intercept) def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, n_iter): """Fit a multi-class classifier by combining binary classifiers Each binary classifier predicts one class versus all others. This strategy is called OVA: One Versus All. """ # Use joblib to fit OvA in parallel. result = Parallel(n_jobs=self.n_jobs, backend="threading", verbose=self.verbose)( delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate, n_iter, self._expanded_class_weight[i], 1., sample_weight) for i in range(len(self.classes_))) for i, (_, intercept) in enumerate(result): self.intercept_[i] = intercept self.t_ += n_iter * X.shape[0] if self.average > 0: if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.standard_intercept_ = np.atleast_1d(intercept) self.intercept_ = self.standard_intercept_ def partial_fit(self, X, y, classes=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of the training data y : numpy array, shape (n_samples,) Subset of the target values classes : array, shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ if self.class_weight in ['balanced', 'auto']: raise ValueError("class_weight '{0}' is not supported for " "partial_fit. In order to use 'balanced' weights, " "use compute_class_weight('{0}', classes, y). " "In place of y you can us a large enough sample " "of the full training set target to properly " "estimate the class frequency distributions. " "Pass the resulting weights as the class_weight " "parameter.".format(self.class_weight)) return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, n_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : array, shape (n_classes,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the contructor) if class_weight is specified Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin): """Linear classifiers (SVM, logistic regression, a.o.) with SGD training. This estimator implements regularized linear models with stochastic gradient descent (SGD) learning: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). SGD allows minibatch (online/out-of-core) learning, see the partial_fit method. For best results using the default learning rate schedule, the data should have zero mean and unit variance. This implementation works with data represented as dense or sparse arrays of floating point values for the features. The model it fits can be controlled with the loss parameter; by default, it fits a linear support vector machine (SVM). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\ 'perceptron', or a regression loss: 'squared_loss', 'huber',\ 'epsilon_insensitive', or 'squared_epsilon_insensitive' The loss function to be used. Defaults to 'hinge', which gives a linear SVM. The 'log' loss gives logistic regression, a probabilistic classifier. 'modified_huber' is another smooth loss that brings tolerance to outliers as well as probability estimates. 'squared_hinge' is like hinge but is quadratically penalized. 'perceptron' is the linear loss used by the perceptron algorithm. The other losses are designed for regression but can be useful in classification as well; see SGDRegressor for a description. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). The number of iterations is set to 1 if using partial_fit. Defaults to 5. shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. n_jobs : integer, optional The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. -1 means 'all CPUs'. Defaults to 1. learning_rate : string, optional The learning rate schedule: constant: eta = eta0 optimal: eta = 1.0 / (t + t0) [default] invscaling: eta = eta0 / pow(t, power_t) where t0 is chosen by a heuristic proposed by Leon Bottou. eta0 : double The initial learning rate for the 'constant' or 'invscaling' schedules. The default value is 0.0 as eta0 is not used by the default schedule 'optimal'. power_t : double The exponent for inverse scaling learning rate [default 0.5]. class_weight : dict, {class_label: weight} or "balanced" or None, optional Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So average=10 will begin averaging after seeing 10 samples. Attributes ---------- coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\ n_features) Weights assigned to the features. intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,) Constants in decision function. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> Y = np.array([1, 1, 2, 2]) >>> clf = linear_model.SGDClassifier() >>> clf.fit(X, Y) ... #doctest: +NORMALIZE_WHITESPACE SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1, eta0=0.0, fit_intercept=True, l1_ratio=0.15, learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, random_state=None, shuffle=True, verbose=0, warm_start=False) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- LinearSVC, LogisticRegression, Perceptron """ def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None, learning_rate="optimal", eta0=0.0, power_t=0.5, class_weight=None, warm_start=False, average=False): super(SGDClassifier, self).__init__( loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, class_weight=class_weight, warm_start=warm_start, average=average) def _check_proba(self): check_is_fitted(self, "t_") if self.loss not in ("log", "modified_huber"): raise AttributeError("probability estimates are not available for" " loss=%r" % self.loss) @property def predict_proba(self): """Probability estimates. This method is only available for log loss and modified Huber loss. Multiclass probability estimates are derived from binary (one-vs.-rest) estimates by simple normalization, as recommended by Zadrozny and Elkan. Binary probability estimates for loss="modified_huber" are given by (clip(decision_function(X), -1, 1) + 1) / 2. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. References ---------- Zadrozny and Elkan, "Transforming classifier scores into multiclass probability estimates", SIGKDD'02, http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf The justification for the formula in the loss="modified_huber" case is in the appendix B in: http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf """ self._check_proba() return self._predict_proba def _predict_proba(self, X): if self.loss == "log": return self._predict_proba_lr(X) elif self.loss == "modified_huber": binary = (len(self.classes_) == 2) scores = self.decision_function(X) if binary: prob2 = np.ones((scores.shape[0], 2)) prob = prob2[:, 1] else: prob = scores np.clip(scores, -1, 1, prob) prob += 1. prob /= 2. if binary: prob2[:, 0] -= prob prob = prob2 else: # the above might assign zero to all classes, which doesn't # normalize neatly; work around this to produce uniform # probabilities prob_sum = prob.sum(axis=1) all_zero = (prob_sum == 0) if np.any(all_zero): prob[all_zero, :] = 1 prob_sum[all_zero] = len(self.classes_) # normalize prob /= prob_sum.reshape((prob.shape[0], -1)) return prob else: raise NotImplementedError("predict_(log_)proba only supported when" " loss='log' or loss='modified_huber' " "(%r given)" % self.loss) @property def predict_log_proba(self): """Log of probability estimates. This method is only available for log loss and modified Huber loss. When loss="modified_huber", probability estimates may be hard zeros and ones, so taking the logarithm is not possible. See ``predict_proba`` for details. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- T : array-like, shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ self._check_proba() return self._predict_log_proba def _predict_log_proba(self, X): return np.log(self.predict_proba(X)) class BaseSGDRegressor(BaseSGD, RegressorMixin): loss_functions = { "squared_loss": (SquaredLoss, ), "huber": (Huber, DEFAULT_EPSILON), "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), } @abstractmethod def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, warm_start=False, average=False): super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average) def _partial_fit(self, X, y, alpha, C, loss, learning_rate, n_iter, sample_weight, coef_init, intercept_init): X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64) y = astype(y, np.float64, copy=False) n_samples, n_features = X.shape self._validate_params() # Allocate datastructures from input arguments sample_weight = self._validate_sample_weight(sample_weight, n_samples) if self.coef_ is None: self._allocate_parameter_mem(1, n_features, coef_init, intercept_init) elif n_features != self.coef_.shape[-1]: raise ValueError("Number of features %d does not match previous data %d." % (n_features, self.coef_.shape[-1])) if self.average > 0 and self.average_coef_ is None: self.average_coef_ = np.zeros(n_features, dtype=np.float64, order="C") self.average_intercept_ = np.zeros(1, dtype=np.float64, order="C") self._fit_regressor(X, y, alpha, C, loss, learning_rate, sample_weight, n_iter) return self def partial_fit(self, X, y, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of training data y : numpy array of shape (n_samples,) Subset of target values sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : returns an instance of self. """ return self._partial_fit(X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, n_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None) def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None, intercept_init=None, sample_weight=None): if self.warm_start and self.coef_ is not None: if coef_init is None: coef_init = self.coef_ if intercept_init is None: intercept_init = self.intercept_ else: self.coef_ = None self.intercept_ = None if self.average > 0: self.standard_intercept_ = self.intercept_ self.standard_coef_ = self.coef_ self.average_coef_ = None self.average_intercept_ = None # Clear iteration count for multiple call to fit. self.t_ = None return self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter, sample_weight, coef_init, intercept_init) def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): """Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values coef_init : array, shape (n_features,) The initial coefficients to warm-start the optimization. intercept_init : array, shape (1,) The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : returns an instance of self. """ return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight) @deprecated(" and will be removed in 0.19.") def decision_function(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ return self._decision_function(X) def _decision_function(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all) X = check_array(X, accept_sparse='csr') scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() def predict(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Returns ------- array, shape (n_samples,) Predicted target values per element in X. """ return self._decision_function(X) def _fit_regressor(self, X, y, alpha, C, loss, learning_rate, sample_weight, n_iter): dataset, intercept_decay = make_dataset(X, y, sample_weight) loss_function = self._get_loss_function(loss) penalty_type = self._get_penalty_type(self.penalty) learning_rate_type = self._get_learning_rate_type(learning_rate) if self.t_ is None: self.t_ = 1.0 random_state = check_random_state(self.random_state) # numpy mtrand expects a C long which is a signed 32 bit integer under # Windows seed = random_state.randint(0, np.iinfo(np.int32).max) if self.average > 0: self.standard_coef_, self.standard_intercept_, \ self.average_coef_, self.average_intercept_ =\ average_sgd(self.standard_coef_, self.standard_intercept_[0], self.average_coef_, self.average_intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay, self.average) self.average_intercept_ = np.atleast_1d(self.average_intercept_) self.standard_intercept_ = np.atleast_1d(self.standard_intercept_) self.t_ += n_iter * X.shape[0] if self.average <= self.t_ - 1.0: self.coef_ = self.average_coef_ self.intercept_ = self.average_intercept_ else: self.coef_ = self.standard_coef_ self.intercept_ = self.standard_intercept_ else: self.coef_, self.intercept_ = \ plain_sgd(self.coef_, self.intercept_[0], loss_function, penalty_type, alpha, C, self.l1_ratio, dataset, n_iter, int(self.fit_intercept), int(self.verbose), int(self.shuffle), seed, 1.0, 1.0, learning_rate_type, self.eta0, self.power_t, self.t_, intercept_decay) self.t_ += n_iter * X.shape[0] self.intercept_ = np.atleast_1d(self.intercept_) class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin): """Linear model fitted by minimizing a regularized empirical loss with SGD SGD stands for Stochastic Gradient Descent: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a decreasing strength schedule (aka learning rate). The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using either the squared euclidean norm L2 or the absolute norm L1 or a combination of both (Elastic Net). If the parameter update crosses the 0.0 value because of the regularizer, the update is truncated to 0.0 to allow for learning sparse models and achieve online feature selection. This implementation works with data represented as dense numpy arrays of floating point values for the features. Read more in the :ref:`User Guide <sgd>`. Parameters ---------- loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \ or 'squared_epsilon_insensitive' The loss function to be used. Defaults to 'squared_loss' which refers to the ordinary least squares fit. 'huber' modifies 'squared_loss' to focus less on getting outliers correct by switching from squared to linear loss past a distance of epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is linear past that; this is the loss function used in SVR. 'squared_epsilon_insensitive' is the same but becomes squared loss past a tolerance of epsilon. penalty : str, 'none', 'l2', 'l1', or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to 'l2' which is the standard regularizer for linear SVM models. 'l1' and 'elasticnet' might bring sparsity to the model (feature selection) not achievable with 'l2'. alpha : float Constant that multiplies the regularization term. Defaults to 0.0001 l1_ratio : float The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Defaults to 0.15. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). The number of iterations is set to 1 if using partial_fit. Defaults to 5. shuffle : bool, optional Whether or not the training data should be shuffled after each epoch. Defaults to True. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level. epsilon : float Epsilon in the epsilon-insensitive loss functions; only if `loss` is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. For 'huber', determines the threshold at which it becomes less important to get the prediction exactly right. For epsilon-insensitive, any differences between the current prediction and the correct label are ignored if they are less than this threshold. learning_rate : string, optional The learning rate: constant: eta = eta0 optimal: eta = 1.0/(alpha * t) invscaling: eta = eta0 / pow(t, power_t) [default] eta0 : double, optional The initial learning rate [default 0.01]. power_t : double, optional The exponent for inverse scaling learning rate [default 0.25]. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So ``average=10 will`` begin averaging after seeing 10 samples. Attributes ---------- coef_ : array, shape (n_features,) Weights assigned to the features. intercept_ : array, shape (1,) The intercept term. average_coef_ : array, shape (n_features,) Averaged weights assigned to the features. average_intercept_ : array, shape (1,) The averaged intercept term. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = linear_model.SGDRegressor() >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01, fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling', loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25, random_state=None, shuffle=True, verbose=0, warm_start=False) See also -------- Ridge, ElasticNet, Lasso, SVR """ def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001, l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, random_state=None, learning_rate="invscaling", eta0=0.01, power_t=0.25, warm_start=False, average=False): super(SGDRegressor, self).__init__(loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, epsilon=epsilon, random_state=random_state, learning_rate=learning_rate, eta0=eta0, power_t=power_t, warm_start=warm_start, average=average)
bsd-3-clause
UNR-AERIAL/scikit-learn
examples/feature_selection/plot_rfe_with_cross_validation.py
226
1384
""" =================================================== Recursive feature elimination with cross-validation =================================================== A recursive feature elimination example with automatic tuning of the number of features selected with cross-validation. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold from sklearn.feature_selection import RFECV from sklearn.datasets import make_classification # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0) # Create the RFE object and compute a cross-validated score. svc = SVC(kernel="linear") # The "accuracy" scoring is proportional to the number of correct # classifications rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2), scoring='accuracy') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show()
bsd-3-clause
3manuek/scikit-learn
examples/gaussian_process/plot_gp_regression.py
253
4054
#!/usr/bin/python # -*- coding: utf-8 -*- r""" ========================================================= Gaussian Processes regression: basic introductory example ========================================================= A simple one-dimensional regression exercise computed in two different ways: 1. A noise-free case with a cubic correlation model 2. A noisy case with a squared Euclidean correlation model In both cases, the model parameters are estimated using the maximum likelihood principle. The figures illustrate the interpolating property of the Gaussian Process model as well as its probabilistic nature in the form of a pointwise 95% confidence interval. Note that the parameter ``nugget`` is applied as a Tikhonov regularization of the assumed covariance between the training points. In the special case of the squared euclidean correlation model, nugget is mathematically equivalent to a normalized variance: That is .. math:: \mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2 """ print(__doc__) # Author: Vincent Dubourg <[email protected]> # Jake Vanderplas <[email protected]> # Licence: BSD 3 clause import numpy as np from sklearn.gaussian_process import GaussianProcess from matplotlib import pyplot as pl np.random.seed(1) def f(x): """The function to predict.""" return x * np.sin(x) #---------------------------------------------------------------------- # First the noiseless case X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T # Observations y = f(X).ravel() # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on # the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.plot(X, y, 'r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') #---------------------------------------------------------------------- # now the noisy case X = np.linspace(0.1, 9.9, 20) X = np.atleast_2d(X).T # Observations and noise y = f(X).ravel() dy = 0.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noise # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='squared_exponential', theta0=1e-1, thetaL=1e-3, thetaU=1, nugget=(dy / y) ** 2, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on # the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') pl.show()
bsd-3-clause
snnn/tensorflow
tensorflow/python/estimator/canned/dnn_linear_combined_test.py
3
40490
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for dnn_linear_combined.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import tempfile from absl.testing import parameterized import numpy as np import six from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.estimator import estimator from tensorflow.python.estimator.canned import dnn_linear_combined from tensorflow.python.estimator.canned import dnn_testing_utils from tensorflow.python.estimator.canned import linear_testing_utils from tensorflow.python.estimator.canned import prediction_keys from tensorflow.python.estimator.export import export from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.estimator.inputs import pandas_io from tensorflow.python.feature_column import feature_column from tensorflow.python.feature_column import feature_column_v2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import nn from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.summary.writer import writer_cache from tensorflow.python.training import checkpoint_utils from tensorflow.python.training import gradient_descent from tensorflow.python.training import input as input_lib from tensorflow.python.training import optimizer as optimizer_lib try: # pylint: disable=g-import-not-at-top import pandas as pd HAS_PANDAS = True except IOError: # Pandas writes a temporary file during import. If it fails, don't use pandas. HAS_PANDAS = False except ImportError: HAS_PANDAS = False class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn) def _dnn_only_model_fn(self, features, labels, mode, head, hidden_units, feature_columns, optimizer='Adagrad', activation_fn=nn.relu, dropout=None, input_layer_partitioner=None, config=None): return dnn_linear_combined._dnn_linear_combined_model_fn( features=features, labels=labels, mode=mode, head=head, linear_feature_columns=[], dnn_hidden_units=hidden_units, dnn_feature_columns=feature_columns, dnn_optimizer=optimizer, dnn_activation_fn=activation_fn, dnn_dropout=dropout, input_layer_partitioner=input_layer_partitioner, config=config) # A function to mimic linear-regressor init reuse same tests. def _linear_regressor_fn(feature_columns, model_dir=None, label_dimension=1, weight_column=None, optimizer='Ftrl', config=None, partitioner=None, sparse_combiner='sum'): return dnn_linear_combined.DNNLinearCombinedRegressor( model_dir=model_dir, linear_feature_columns=feature_columns, linear_optimizer=optimizer, label_dimension=label_dimension, weight_column=weight_column, input_layer_partitioner=partitioner, config=config, linear_sparse_combiner=sparse_combiner) class LinearOnlyRegressorPartitionerTest( linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column) class LinearOnlyRegressorPartitionerV2Test( linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column_v2) class LinearOnlyRegressorEvaluationTest( linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column) class LinearOnlyRegressorEvaluationV2Test( linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column_v2) class LinearOnlyRegressorPredictTest( linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorPredictTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column) class LinearOnlyRegressorPredictV2Test( linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorPredictTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column_v2) class LinearOnlyRegressorIntegrationTest( linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column) class LinearOnlyRegressorIntegrationV2Test( linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column_v2) class LinearOnlyRegressorTrainingTest( linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorTrainingTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column) class LinearOnlyRegressorTrainingV2Test( linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorTrainingTest.__init__( self, _linear_regressor_fn, fc_lib=feature_column_v2) def _linear_classifier_fn(feature_columns, model_dir=None, n_classes=2, weight_column=None, label_vocabulary=None, optimizer='Ftrl', config=None, partitioner=None, sparse_combiner='sum'): return dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=model_dir, linear_feature_columns=feature_columns, linear_optimizer=optimizer, n_classes=n_classes, weight_column=weight_column, label_vocabulary=label_vocabulary, input_layer_partitioner=partitioner, config=config, linear_sparse_combiner=sparse_combiner) class LinearOnlyClassifierTrainingTest( linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierTrainingTest.__init__( self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column) class LinearOnlyClassifierTrainingV2Test( linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierTrainingTest.__init__( self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column_v2) class LinearOnlyClassifierClassesEvaluationTest( linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__( self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column) class LinearOnlyClassifierClassesEvaluationV2Test( linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__( self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column_v2) class LinearOnlyClassifierPredictTest( linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierPredictTest.__init__( self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column) class LinearOnlyClassifierPredictV2Test( linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierPredictTest.__init__( self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column_v2) class LinearOnlyClassifierIntegrationTest( linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__( self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column) class LinearOnlyClassifierIntegrationV2Test( linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__( self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column_v2) @parameterized.parameters((feature_column,), (feature_column_v2,)) class DNNLinearCombinedRegressorIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, label_dimension, batch_size, fc_impl): linear_feature_columns = [ fc_impl.numeric_column('x', shape=(input_dimension,)) ] dnn_feature_columns = [ fc_impl.numeric_column('x', shape=(input_dimension,)) ] feature_columns = linear_feature_columns + dnn_feature_columns est = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=linear_feature_columns, dnn_hidden_units=(2, 2), dnn_feature_columns=dnn_feature_columns, label_dimension=label_dimension, model_dir=self._model_dir) # TRAIN num_steps = 10 est.train(train_input_fn, steps=num_steps) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn('loss', six.iterkeys(scores)) # PREDICT predictions = np.array([ x[prediction_keys.PredictionKeys.PREDICTIONS] for x in est.predict(predict_input_fn) ]) self.assertAllEqual((batch_size, label_dimension), predictions.shape) # EXPORT feature_spec = fc_impl.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) def test_numpy_input_fn(self, fc_impl): """Tests complete flow with numpy_input_fn.""" label_dimension = 2 batch_size = 10 data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) data = data.reshape(batch_size, label_dimension) # learn y = x train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, batch_size=batch_size, fc_impl=fc_impl) def test_pandas_input_fn(self, fc_impl): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return label_dimension = 1 batch_size = 10 data = np.linspace(0., 2., batch_size, dtype=np.float32) x = pd.DataFrame({'x': data}) y = pd.Series(data) train_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, shuffle=False) predict_input_fn = pandas_io.pandas_input_fn( x=x, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, batch_size=batch_size, fc_impl=fc_impl) def test_input_fn_from_parse_example(self, fc_impl): """Tests complete flow with input_fn constructed from parse_example.""" label_dimension = 2 batch_size = 10 data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) data = data.reshape(batch_size, label_dimension) serialized_examples = [] for datum in data: example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), 'y': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32), 'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32), } def _train_input_fn(): feature_map = parsing_ops.parse_example(serialized_examples, feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, batch_size=batch_size, fc_impl=fc_impl) # A function to mimic dnn-classifier init reuse same tests. def _dnn_classifier_fn(hidden_units, feature_columns, model_dir=None, n_classes=2, weight_column=None, label_vocabulary=None, optimizer='Adagrad', config=None, input_layer_partitioner=None): return dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=model_dir, dnn_hidden_units=hidden_units, dnn_feature_columns=feature_columns, dnn_optimizer=optimizer, n_classes=n_classes, weight_column=weight_column, label_vocabulary=label_vocabulary, input_layer_partitioner=input_layer_partitioner, config=config) class DNNOnlyClassifierEvaluateTest( dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__( self, _dnn_classifier_fn, fc_impl=feature_column) class DNNOnlyClassifierEvaluateV2Test( dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__( self, _dnn_classifier_fn, fc_impl=feature_column_v2) class DNNOnlyClassifierPredictTest( dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierPredictTest.__init__( self, _dnn_classifier_fn, fc_impl=feature_column) class DNNOnlyClassifierPredictV2Test( dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierPredictTest.__init__( self, _dnn_classifier_fn, fc_impl=feature_column_v2) class DNNOnlyClassifierTrainTest( dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierTrainTest.__init__( self, _dnn_classifier_fn, fc_impl=feature_column) class DNNOnlyClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierTrainTest.__init__( self, _dnn_classifier_fn, fc_impl=feature_column_v2) # A function to mimic dnn-regressor init reuse same tests. def _dnn_regressor_fn(hidden_units, feature_columns, model_dir=None, label_dimension=1, weight_column=None, optimizer='Adagrad', config=None, input_layer_partitioner=None): return dnn_linear_combined.DNNLinearCombinedRegressor( model_dir=model_dir, dnn_hidden_units=hidden_units, dnn_feature_columns=feature_columns, dnn_optimizer=optimizer, label_dimension=label_dimension, weight_column=weight_column, input_layer_partitioner=input_layer_partitioner, config=config) class DNNOnlyRegressorEvaluateTest( dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__( self, _dnn_regressor_fn, fc_impl=feature_column) class DNNOnlyRegressorEvaluateV2Test( dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__( self, _dnn_regressor_fn, fc_impl=feature_column_v2) class DNNOnlyRegressorPredictTest( dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorPredictTest.__init__( self, _dnn_regressor_fn, fc_impl=feature_column) class DNNOnlyRegressorPredictV2Test( dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorPredictTest.__init__( self, _dnn_regressor_fn, fc_impl=feature_column_v2) class DNNOnlyRegressorTrainTest( dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorTrainTest.__init__( self, _dnn_regressor_fn, fc_impl=feature_column) class DNNOnlyRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorTrainTest.__init__( self, _dnn_regressor_fn, fc_impl=feature_column_v2) @parameterized.parameters((feature_column,), (feature_column_v2,)) class DNNLinearCombinedClassifierIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _as_label(self, data_in_float): return np.rint(data_in_float).astype(np.int64) def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, n_classes, batch_size, fc_impl): linear_feature_columns = [ fc_impl.numeric_column('x', shape=(input_dimension,)) ] dnn_feature_columns = [ fc_impl.numeric_column('x', shape=(input_dimension,)) ] feature_columns = linear_feature_columns + dnn_feature_columns est = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=linear_feature_columns, dnn_hidden_units=(2, 2), dnn_feature_columns=dnn_feature_columns, n_classes=n_classes, model_dir=self._model_dir) # TRAIN num_steps = 10 est.train(train_input_fn, steps=num_steps) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn('loss', six.iterkeys(scores)) # PREDICT predicted_proba = np.array([ x[prediction_keys.PredictionKeys.PROBABILITIES] for x in est.predict(predict_input_fn) ]) self.assertAllEqual((batch_size, n_classes), predicted_proba.shape) # EXPORT feature_spec = fc_impl.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) def test_numpy_input_fn(self, fc_impl): """Tests complete flow with numpy_input_fn.""" n_classes = 3 input_dimension = 2 batch_size = 10 data = np.linspace( 0., n_classes - 1., batch_size * input_dimension, dtype=np.float32) x_data = data.reshape(batch_size, input_dimension) y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1))) # learn y = x train_input_fn = numpy_io.numpy_input_fn( x={'x': x_data}, y=y_data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': x_data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, batch_size=batch_size, fc_impl=fc_impl) def test_pandas_input_fn(self, fc_impl): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return input_dimension = 1 n_classes = 2 batch_size = 10 data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32) x = pd.DataFrame({'x': data}) y = pd.Series(self._as_label(data)) train_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, shuffle=False) predict_input_fn = pandas_io.pandas_input_fn( x=x, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, batch_size=batch_size, fc_impl=fc_impl) def test_input_fn_from_parse_example(self, fc_impl): """Tests complete flow with input_fn constructed from parse_example.""" input_dimension = 2 n_classes = 3 batch_size = 10 data = np.linspace(0., n_classes-1., batch_size * input_dimension, dtype=np.float32) data = data.reshape(batch_size, input_dimension) serialized_examples = [] for datum in data: example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=datum)), 'y': feature_pb2.Feature(int64_list=feature_pb2.Int64List( value=self._as_label(datum[:1]))), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32), 'y': parsing_ops.FixedLenFeature([1], dtypes.int64), } def _train_input_fn(): feature_map = parsing_ops.parse_example(serialized_examples, feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, batch_size=batch_size, fc_impl=fc_impl) @parameterized.parameters((feature_column,), (feature_column_v2,)) class DNNLinearCombinedTests(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def _mock_optimizer(self, real_optimizer, var_name_prefix): """Verifies global_step is None and var_names start with given prefix.""" def _minimize(loss, global_step=None, var_list=None): self.assertIsNone(global_step) trainable_vars = var_list or ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) var_names = [var.name for var in trainable_vars] self.assertTrue( all([name.startswith(var_name_prefix) for name in var_names])) # var is used to check this op called by training. with ops.name_scope(''): var = variables_lib.Variable(0., name=(var_name_prefix + '_called')) with ops.control_dependencies([var.assign(100.)]): return real_optimizer.minimize(loss, global_step, var_list) optimizer_mock = test.mock.NonCallableMagicMock( spec=optimizer_lib.Optimizer, wraps=real_optimizer) optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize) return optimizer_mock def test_train_op_calls_both_dnn_and_linear(self, fc_impl): opt = gradient_descent.GradientDescentOptimizer(1.) x_column = fc_impl.numeric_column('x') input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[0.], [1.]])}, y=np.array([[0.], [1.]]), batch_size=1, shuffle=False) est = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[x_column], # verifies linear_optimizer is used only for linear part. linear_optimizer=self._mock_optimizer(opt, 'linear'), dnn_hidden_units=(2, 2), dnn_feature_columns=[x_column], # verifies dnn_optimizer is used only for linear part. dnn_optimizer=self._mock_optimizer(opt, 'dnn'), model_dir=self._model_dir) est.train(input_fn, steps=1) # verifies train_op fires linear minimize op self.assertEqual(100., checkpoint_utils.load_variable( self._model_dir, 'linear_called')) # verifies train_op fires dnn minimize op self.assertEqual(100., checkpoint_utils.load_variable( self._model_dir, 'dnn_called')) def test_dnn_and_linear_logits_are_added(self, fc_impl): with ops.Graph().as_default(): variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights') variables_lib.Variable([2.0], name='linear/linear_model/bias_weights') variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel') variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias') variables_lib.Variable([[5.0]], name='dnn/logits/kernel') variables_lib.Variable([6.0], name='dnn/logits/bias') variables_lib.Variable(1, name='global_step', dtype=dtypes.int64) linear_testing_utils.save_variables_to_ckpt(self._model_dir) x_column = fc_impl.numeric_column('x') est = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[x_column], dnn_hidden_units=[1], dnn_feature_columns=[x_column], model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[10.]])}, batch_size=1, shuffle=False) # linear logits = 10*1 + 2 = 12 # dnn logits = (10*3 + 4)*5 + 6 = 176 # logits = dnn + linear = 176 + 12 = 188 self.assertAllClose( { prediction_keys.PredictionKeys.PREDICTIONS: [188.], }, next(est.predict(input_fn=input_fn))) @parameterized.parameters((feature_column,), (feature_column_v2,)) class DNNLinearCombinedWarmStartingTest(test.TestCase): def setUp(self): # Create a directory to save our old checkpoint and vocabularies to. self._ckpt_and_vocab_dir = tempfile.mkdtemp() # Make a dummy input_fn. def _input_fn(): features = { 'age': [[23.], [31.]], 'city': [['Palo Alto'], ['Mountain View']], } return features, [0, 1] self._input_fn = _input_fn def tearDown(self): # Clean up checkpoint / vocab dir. writer_cache.FileWriterCache.clear() shutil.rmtree(self._ckpt_and_vocab_dir) def test_classifier_basic_warm_starting(self, fc_impl): """Tests correctness of DNNLinearCombinedClassifier default warm-start.""" age = fc_impl.numeric_column('age') city = fc_impl.embedding_column( fc_impl.categorical_column_with_vocabulary_list( 'city', vocabulary_list=['Mountain View', 'Palo Alto']), dimension=5) # Create a DNNLinearCombinedClassifier and train to save a checkpoint. dnn_lc_classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age], dnn_feature_columns=[city], dnn_hidden_units=[256, 128], model_dir=self._ckpt_and_vocab_dir, n_classes=4, linear_optimizer='SGD', dnn_optimizer='SGD') dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1) # Create a second DNNLinearCombinedClassifier, warm-started from the first. # Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't # have accumulator values that change). warm_started_dnn_lc_classifier = ( dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age], dnn_feature_columns=[city], dnn_hidden_units=[256, 128], n_classes=4, linear_optimizer=gradient_descent.GradientDescentOptimizer( learning_rate=0.0), dnn_optimizer=gradient_descent.GradientDescentOptimizer( learning_rate=0.0), warm_start_from=dnn_lc_classifier.model_dir)) warm_started_dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1) for variable_name in warm_started_dnn_lc_classifier.get_variable_names(): self.assertAllClose( dnn_lc_classifier.get_variable_value(variable_name), warm_started_dnn_lc_classifier.get_variable_value(variable_name)) def test_regressor_basic_warm_starting(self, fc_impl): """Tests correctness of DNNLinearCombinedRegressor default warm-start.""" age = fc_impl.numeric_column('age') city = fc_impl.embedding_column( fc_impl.categorical_column_with_vocabulary_list( 'city', vocabulary_list=['Mountain View', 'Palo Alto']), dimension=5) # Create a DNNLinearCombinedRegressor and train to save a checkpoint. dnn_lc_regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[age], dnn_feature_columns=[city], dnn_hidden_units=[256, 128], model_dir=self._ckpt_and_vocab_dir, linear_optimizer='SGD', dnn_optimizer='SGD') dnn_lc_regressor.train(input_fn=self._input_fn, max_steps=1) # Create a second DNNLinearCombinedRegressor, warm-started from the first. # Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't # have accumulator values that change). warm_started_dnn_lc_regressor = ( dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[age], dnn_feature_columns=[city], dnn_hidden_units=[256, 128], linear_optimizer=gradient_descent.GradientDescentOptimizer( learning_rate=0.0), dnn_optimizer=gradient_descent.GradientDescentOptimizer( learning_rate=0.0), warm_start_from=dnn_lc_regressor.model_dir)) warm_started_dnn_lc_regressor.train(input_fn=self._input_fn, max_steps=1) for variable_name in warm_started_dnn_lc_regressor.get_variable_names(): self.assertAllClose( dnn_lc_regressor.get_variable_value(variable_name), warm_started_dnn_lc_regressor.get_variable_value(variable_name)) def test_warm_starting_selective_variables(self, fc_impl): """Tests selecting variables to warm-start.""" age = fc_impl.numeric_column('age') city = fc_impl.embedding_column( fc_impl.categorical_column_with_vocabulary_list( 'city', vocabulary_list=['Mountain View', 'Palo Alto']), dimension=5) # Create a DNNLinearCombinedClassifier and train to save a checkpoint. dnn_lc_classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age], dnn_feature_columns=[city], dnn_hidden_units=[256, 128], model_dir=self._ckpt_and_vocab_dir, n_classes=4, linear_optimizer='SGD', dnn_optimizer='SGD') dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1) # Create a second DNNLinearCombinedClassifier, warm-started from the first. # Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't # have accumulator values that change). warm_started_dnn_lc_classifier = ( dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age], dnn_feature_columns=[city], dnn_hidden_units=[256, 128], n_classes=4, linear_optimizer=gradient_descent.GradientDescentOptimizer( learning_rate=0.0), dnn_optimizer=gradient_descent.GradientDescentOptimizer( learning_rate=0.0), # The provided regular expression will only warm-start the deep # portion of the model. warm_start_from=estimator.WarmStartSettings( ckpt_to_initialize_from=dnn_lc_classifier.model_dir, vars_to_warm_start='.*(dnn).*'))) warm_started_dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1) for variable_name in warm_started_dnn_lc_classifier.get_variable_names(): if 'dnn' in variable_name: self.assertAllClose( dnn_lc_classifier.get_variable_value(variable_name), warm_started_dnn_lc_classifier.get_variable_value(variable_name)) elif 'linear' in variable_name: linear_values = warm_started_dnn_lc_classifier.get_variable_value( variable_name) # Since they're not warm-started, the linear weights will be # zero-initialized. self.assertAllClose(np.zeros_like(linear_values), linear_values) if __name__ == '__main__': test.main()
apache-2.0
ashishbaghudana/relna
setup.py
1
1196
from setuptools import setup from setuptools import find_packages def readme(): with open('README.md') as file: return file.read() setup( name='relna', version='0.1.0', description='Relation Extraction Pipeline for Transcription Factor and Gene or Gene Product relations', long_description=readme(), classifiers=[ 'Development Status :: 3 - Alpha', 'Natural Language :: English', 'Programming Language :: Python :: 3.4', 'Topic :: Text Processing :: Linguistic' ], keywords='svm relation extraction nlp natural language ner transcription factor gene product', url='https://github.com/ashishbaghudana/relna', author='Ashish Baghudana', author_email='[email protected]', license='MIT', packages=find_packages(exclude=['tests']), install_requires=[ 'nala', 'nltk', 'beautifulsoup4', 'requests', 'spacy', 'progress'], # 'matplotlib' # TODO Figure out if we need this, since we might not want this huge dependency #], include_package_data=True, zip_safe=False, test_suite='nose.collector', setup_requires=['nose>=1.0'], )
mit
AlexanderFabisch/scikit-learn
sklearn/linear_model/tests/test_ransac.py
216
13290
import numpy as np from numpy.testing import assert_equal, assert_raises from numpy.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raises_regexp from scipy import sparse from sklearn.utils.testing import assert_less from sklearn.linear_model import LinearRegression, RANSACRegressor from sklearn.linear_model.ransac import _dynamic_max_trials # Generate coordinates of line X = np.arange(-200, 200) y = 0.2 * X + 20 data = np.column_stack([X, y]) # Add some faulty data outliers = np.array((10, 30, 200)) data[outliers[0], :] = (1000, 1000) data[outliers[1], :] = (-1000, -1000) data[outliers[2], :] = (-100, -50) X = data[:, 0][:, np.newaxis] y = data[:, 1] def test_ransac_inliers_outliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_is_data_valid(): def is_data_valid(X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False X = np.random.rand(10, 2) y = np.random.rand(10, 1) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_data_valid=is_data_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_is_model_valid(): def is_model_valid(estimator, X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_model_valid=is_model_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_max_trials(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=0, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=11, random_state=0) assert getattr(ransac_estimator, 'n_trials_', None) is None ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 2) def test_ransac_stop_n_inliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_n_inliers=2, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_stop_score(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_score=0, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_score(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.score(X[2:], y[2:]), 1) assert_less(ransac_estimator.score(X[:2], y[:2]), 1) def test_ransac_predict(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.predict(X), np.zeros(100)) def test_ransac_resid_thresh_no_inliers(): # When residual_threshold=0.0 there are no inliers and a # ValueError with a message should be raised base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.0, random_state=0) assert_raises_regexp(ValueError, "No inliers.*residual_threshold.*0\.0", ransac_estimator.fit, X, y) def test_ransac_sparse_coo(): X_sparse = sparse.coo_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csr(): X_sparse = sparse.csr_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csc(): X_sparse = sparse.csc_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_none_estimator(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0) ransac_estimator.fit(X, y) ransac_none_estimator.fit(X, y) assert_array_almost_equal(ransac_estimator.predict(X), ransac_none_estimator.predict(X)) def test_ransac_min_n_samples(): base_estimator = LinearRegression() ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2. / X.shape[0], residual_threshold=5, random_state=0) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1, residual_threshold=5, random_state=0) ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2, residual_threshold=5, random_state=0) ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0, residual_threshold=5, random_state=0) ransac_estimator6 = RANSACRegressor(base_estimator, residual_threshold=5, random_state=0) ransac_estimator7 = RANSACRegressor(base_estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0) ransac_estimator1.fit(X, y) ransac_estimator2.fit(X, y) ransac_estimator5.fit(X, y) ransac_estimator6.fit(X, y) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator2.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator5.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator6.predict(X)) assert_raises(ValueError, ransac_estimator3.fit, X, y) assert_raises(ValueError, ransac_estimator4.fit, X, y) assert_raises(ValueError, ransac_estimator7.fit, X, y) def test_ransac_multi_dimensional_targets(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # 3-D target values yyy = np.column_stack([y, y, y]) # Estimate parameters of corrupted data ransac_estimator.fit(X, yyy) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_residual_metric(): residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1) residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric2) # multi-dimensional ransac_estimator0.fit(X, yyy) ransac_estimator1.fit(X, yyy) ransac_estimator2.fit(X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) ransac_estimator2.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_default_residual_threshold(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_dynamic_max_trials(): # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in # Hartley, R.~I. and Zisserman, A., 2004, # Multiple View Geometry in Computer Vision, Second Edition, # Cambridge University Press, ISBN: 0521540518 # e = 0%, min_samples = X assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1) # e = 5%, min_samples = 2 assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2) # e = 10%, min_samples = 2 assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3) # e = 30%, min_samples = 2 assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7) # e = 50%, min_samples = 2 assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17) # e = 5%, min_samples = 8 assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5) # e = 10%, min_samples = 8 assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9) # e = 30%, min_samples = 8 assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78) # e = 50%, min_samples = 8 assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177) # e = 0%, min_samples = 10 assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0) assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf')) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=-0.1) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=1.1) assert_raises(ValueError, ransac_estimator.fit, X, y)
bsd-3-clause
mathieuaubry/features_analysis
cnn_statistics.py
1
7598
from skimage.transform import resize from image_generator import ImageGenerator import matplotlib.pyplot as plt import numpy as np import os import time import json import sys import caffe import scipy import math import shutil import string from scipy import ndimage from scipy.sparse.linalg import eigs def var_sep(generator,net,results_folder,test_layers): # compute the relative variance along the two directions of variations without # computing the covariance matrix if not os.path.exists(results_folder): os.makedirs(results_folder) if not os.path.exists(results_folder+'/pca'): os.makedirs(results_folder+'/pca') translation=string.maketrans('/', '_') var={} mean={} size={} var_dim1={} var_dim2={} tmp=np.zeros([256,256,3]) caffe_input = np.asarray([net.transformer.preprocess('data', tmp)]) results=net.forward_all(data=caffe_input,blobs=test_layers) t = time.time() for layer in test_layers: size[layer]=len(results[layer].flatten()) var[layer] = 0 mean[layer] = np.zeros([size[layer] ]) var_dim1[layer]=0 var_dim2[layer]=0 N_dim1=generator.N1 t = time.time() for instance_id in range(0,N_dim1): caffe_input = np.asarray([net.transformer.preprocess('data', in_) for in_ in generator.generator()]) results=net.forward_all(data=caffe_input,blobs=test_layers) for layer in test_layers: R=np.reshape(results[layer],[generator.N2,size[layer]]) mean[layer]=mean[layer]+R var[layer]=var[layer]+np.mean(np.sum(R*R,axis=1)) m=np.mean(R,axis=0) v=np.sum(m*m) var_dim2[layer]=var_dim2[layer]+v print('\rcomputed '+str(instance_id+1)+' instances in ' +str(time.time()-t)+' seconds'), print('\r') for layer in test_layers: layer_t=layer.translate(translation) mean[layer]=mean[layer]/N_dim1 mean_tot=np.mean(mean[layer],axis=0) var_dim1= np.sum(mean[layer]*mean[layer])/generator.N2-np.sum(mean_tot*mean_tot) var[layer]=var[layer]/N_dim1-np.sum(mean_tot*mean_tot) var_dim2[layer]=var_dim2[layer]/N_dim1-np.sum(mean_tot*mean_tot) var_x=var[layer]-var_dim2[layer]-var_dim1 print('Variance repartition layer '+layer+' :') print('Dimension 1 : '+'%.1f'%(100*var_dim1/var[layer])+'%') print('Dimension 2 : '+'%.1f'%(100*var_dim2[layer]/var[layer])+'%') print('Residual : '+'%.1f'%(100*var_x/var[layer])+'%') return def pca(generator,net,results_folder,test_layers,N_projections=100): if not os.path.exists(results_folder): os.makedirs(results_folder) if not os.path.exists(results_folder+'/pca'): os.makedirs(results_folder+'/pca') translation=string.maketrans('/', '_') var={} mean={} size={} var_dim1={} var_dim2={} tmp=np.zeros([256,256,3]) caffe_input = np.asarray([net.transformer.preprocess('data', tmp)]) results=net.forward_all(data=caffe_input,blobs=test_layers) for layer in test_layers: size[layer]=len(results[layer].flatten()) var[layer] = np.zeros([size[layer],size[layer] ]) mean[layer] = np.zeros([size[layer] ]) var_dim1[layer]=np.zeros([size[layer],size[layer] ]) var_dim2[layer]=np.zeros([size[layer],size[layer] ]) print('') t = time.time() N_dim1=generator.N1 for instance_id in range(0,N_dim1): caffe_input = np.asarray([net.transformer.preprocess('data', in_) for in_ in generator.generator()]) results=net.forward_all(data=caffe_input,blobs=test_layers) for layer in test_layers: R=np.reshape(results[layer],[generator.N2,size[layer]]) mean[layer]=mean[layer]+R v=np.dot(R.transpose(),R)/generator.N2 var[layer]=var[layer]+v m=np.mean(R,axis=0) var_dim2[layer]=var_dim2[layer]+np.outer(m,m) print('\rcomputed '+str(instance_id+1)+' instances in ' +str(time.time()-t)+' seconds'), print('\r'), for layer in test_layers: layer_t=layer.translate(translation) mean[layer]=mean[layer]/N_dim1 mean_tot=np.mean(mean[layer],axis=0) mean[layer]=mean[layer]-np.tile(np.reshape(mean_tot,[1, len(mean_tot)]),[generator.N2,1]) var_dim1= np.dot(mean[layer].transpose(),mean[layer])/generator.N2 eig_vals, eig_vecs =eigs(var_dim1,k=min(N_projections,var_dim1.shape[0]-2)) eig_vals=np.real(eig_vals) order=np.argsort(eig_vals)[::-1] eig_vals=eig_vals[order] eig_vecs=eig_vecs[:,order] np.save(results_folder+'/pca/evecs_dim1_layer_'+layer_t,eig_vecs ) np.save(results_folder+'/pca/evals_dim1_layer_'+layer_t,eig_vals) v=eig_vals/eig_vals.sum() v=np.cumsum(v) ind=0 while ((ind<(len(v)-1)) & (v[ind]<0.95)): ind=ind+1 dim_dim1=ind+1 var_dim2[layer]=var_dim2[layer]/N_dim1-np.outer(mean_tot,mean_tot) eig_vals, eig_vecs =eigs(var_dim2[layer],k=min(N_projections,var_dim2[layer].shape[0]-2)) eig_vals=np.real(eig_vals) order=np.argsort(eig_vals)[::-1] eig_vals=eig_vals[order] eig_vecs=eig_vecs[:,order] np.save(results_folder+'/pca/evecs_dim2_layer_'+layer_t,eig_vecs) np.save(results_folder+'/pca/evals_dim2_layer_'+layer_t,eig_vals ) v=eig_vals/eig_vals.sum() v=np.cumsum(v) ind=0 while ((ind<(len(v)-1)) & (v[ind]<0.95)): ind=ind+1 dim_dim2=ind+1 var[layer]=var[layer]/N_dim1-np.outer(mean_tot,mean_tot) eig_vals, eig_vecs =eigs(var[layer],k=min(N_projections,var[layer].shape[0]-2)) eig_vals=np.real(eig_vals) order=np.argsort(eig_vals)[::-1] eig_vals=eig_vals[order] eig_vecs=eig_vecs[:,order] np.save(results_folder+'/pca/evecs_layer_'+layer_t,eig_vecs) np.save(results_folder+'/pca/evals_layer_'+layer_t,eig_vals ) v=eig_vals/eig_vals.sum() v=np.cumsum(v) ind=0 while ((ind<(len(v)-1)) & (v[ind]<0.95)): ind=ind+1 dim_tot=ind+1 var_x=var[layer]-var_dim2[layer]-var_dim1 eig_vals, eig_vecs =eigs(var_x,k=min(N_projections,var_x.shape[0]-2)) eig_vals=np.real(eig_vals) order=np.argsort(eig_vals)[::-1] eig_vals=eig_vals[order] eig_vecs=eig_vecs[:,order] np.save(results_folder+'/pca/evecs_x_layer_'+layer_t,eig_vecs ) np.save(results_folder+'/pca/evals_x_layer_'+layer_t,eig_vals ) v=eig_vals/eig_vals.sum() v=np.cumsum(v) ind=0 while ((ind<(len(v)-1)) & (v[ind]<0.95)): ind=ind+1 dim_x=ind+1 print('\r'), print('Variance repartition layer '+layer+' :') print('Dimension 1 : '+'%.1f'%(100*np.trace(var_dim1)/np.trace(var[layer]))+'%') print('Dimension 2 : '+'%.1f'%(100*np.trace(var_dim2[layer])/np.trace(var[layer]))+'%') print('Residual : '+'%.1f'%(100*np.trace(var_x)/np.trace(var[layer]))+'%') print('Dimensions explaining 95% of the vairance, layer '+layer+' (Warning: this quantity make sense only if enough data are provides) :') print('Dimension 1 : '+'%s'%(dim_dim1)) print('Dimension 2 : '+'%s'%(dim_dim2)) print('Residual : '+'%s'%(dim_x)) print('Full : '+'%s'%(dim_tot)) print('pca layer '+layer+' in ' +str(time.time()-t)+' seconds'), t = time.time() print('\r') return
mit
arjoly/scikit-learn
sklearn/tests/test_cross_validation.py
3
46513
"""Test the cross_validation module""" from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy.sparse import csr_matrix from scipy import stats from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame from sklearn import cross_validation as cval from sklearn.datasets import make_regression from sklearn.datasets import load_boston from sklearn.datasets import load_digits from sklearn.datasets import load_iris from sklearn.datasets import make_multilabel_classification from sklearn.metrics import explained_variance_score from sklearn.metrics import make_scorer from sklearn.metrics import precision_score from sklearn.externals import six from sklearn.externals.six.moves import zip from sklearn.linear_model import Ridge from sklearn.multiclass import OneVsRestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.cluster import KMeans from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, a=0, allow_nd=False): self.a = a self.allow_nd = allow_nd def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None): """The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function """ self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if callback is not None: callback(self) if self.allow_nd: X = X.reshape(len(X), -1) if X.ndim >= 3 and not self.allow_nd: raise ValueError('X cannot be d') if sample_weight is not None: assert_true(sample_weight.shape[0] == X.shape[0], 'MockClassifier extra fit_param sample_weight.shape[0]' ' is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0])) if class_prior is not None: assert_true(class_prior.shape[0] == len(np.unique(y)), 'MockClassifier extra fit_param class_prior.shape[0]' ' is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y)))) if sparse_sample_weight is not None: fmt = ('MockClassifier extra fit_param sparse_sample_weight' '.shape[0] is {0}, should be {1}') assert_true(sparse_sample_weight.shape[0] == X.shape[0], fmt.format(sparse_sample_weight.shape[0], X.shape[0])) if sparse_param is not None: fmt = ('MockClassifier extra fit_param sparse_param.shape ' 'is ({0}, {1}), should be ({2}, {3})') assert_true(sparse_param.shape == P_sparse.shape, fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1])) return self def predict(self, T): if self.allow_nd: T = T.reshape(len(T), -1) return T[:, 0] def score(self, X=None, Y=None): return 1. / (1 + np.abs(self.a)) def get_params(self, deep=False): return {'a': self.a, 'allow_nd': self.allow_nd} X = np.ones((10, 2)) X_sparse = coo_matrix(X) W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1)) P_sparse = coo_matrix(np.eye(5)) # avoid StratifiedKFold's Warning about least populated class in y y = np.arange(10) % 3 ############################################################################## # Tests def check_valid_split(train, test, n_samples=None): # Use python sets to get more informative assertion failure messages train, test = set(train), set(test) # Train and test split should not overlap assert_equal(train.intersection(test), set()) if n_samples is not None: # Check that the union of train an test split cover all the indices assert_equal(train.union(test), set(range(n_samples))) def check_cv_coverage(cv, expected_n_iter=None, n_samples=None): # Check that a all the samples appear at least once in a test fold if expected_n_iter is not None: assert_equal(len(cv), expected_n_iter) else: expected_n_iter = len(cv) collected_test_samples = set() iterations = 0 for train, test in cv: check_valid_split(train, test, n_samples=n_samples) iterations += 1 collected_test_samples.update(test) # Check that the accumulated test samples cover the whole dataset assert_equal(iterations, expected_n_iter) if n_samples is not None: assert_equal(collected_test_samples, set(range(n_samples))) def test_kfold_valueerrors(): # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.KFold, 3, 4) # Check that a warning is raised if the least populated class has too few # members. y = [3, 3, -1, -1, 2] cv = assert_warns_message(Warning, "The least populated class", cval.StratifiedKFold, y, 3) # Check that despite the warning the folds are still computed even # though all the classes are not necessarily represented at on each # side of the split at each split check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y)) # Error when number of folds is <= 1 assert_raises(ValueError, cval.KFold, 2, 0) assert_raises(ValueError, cval.KFold, 2, 1) assert_raises(ValueError, cval.StratifiedKFold, y, 0) assert_raises(ValueError, cval.StratifiedKFold, y, 1) # When n is not integer: assert_raises(ValueError, cval.KFold, 2.5, 2) # When n_folds is not integer: assert_raises(ValueError, cval.KFold, 5, 1.5) assert_raises(ValueError, cval.StratifiedKFold, y, 1.5) def test_kfold_indices(): # Check all indices are returned in the test folds kf = cval.KFold(300, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=300) # Check all indices are returned in the test folds even when equal-sized # folds are not possible kf = cval.KFold(17, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=17) def test_kfold_no_shuffle(): # Manually check that KFold preserves the data ordering on toy datasets splits = iter(cval.KFold(4, 2)) train, test = next(splits) assert_array_equal(test, [0, 1]) assert_array_equal(train, [2, 3]) train, test = next(splits) assert_array_equal(test, [2, 3]) assert_array_equal(train, [0, 1]) splits = iter(cval.KFold(5, 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 2]) assert_array_equal(train, [3, 4]) train, test = next(splits) assert_array_equal(test, [3, 4]) assert_array_equal(train, [0, 1, 2]) def test_stratified_kfold_no_shuffle(): # Manually check that StratifiedKFold preserves the data ordering as much # as possible on toy datasets in order to avoid hiding sample dependencies # when possible splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 2]) assert_array_equal(train, [1, 3]) train, test = next(splits) assert_array_equal(test, [1, 3]) assert_array_equal(train, [0, 2]) splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 3, 4]) assert_array_equal(train, [2, 5, 6]) train, test = next(splits) assert_array_equal(test, [2, 5, 6]) assert_array_equal(train, [0, 1, 3, 4]) def test_stratified_kfold_ratios(): # Check that stratified kfold preserves label ratios in individual splits # Repeat with shuffling turned off and on n_samples = 1000 labels = np.array([4] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) for shuffle in [False, True]: for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle): assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10, 2) assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89, 2) assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01, 2) assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2) assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2) assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2) def test_kfold_balance(): # Check that KFold returns folds with balanced sizes for kf in [cval.KFold(i, 5) for i in range(11, 17)]: sizes = [] for _, test in kf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), kf.n) def test_stratifiedkfold_balance(): # Check that KFold returns folds with balanced sizes (only when # stratification is possible) # Repeat with shuffling turned off and on labels = [0] * 3 + [1] * 14 for shuffle in [False, True]: for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle) for i in range(11, 17)]: sizes = [] for _, test in skf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), skf.n) def test_shuffle_kfold(): # Check the indices are shuffled properly, and that all indices are # returned in the different test folds kf = cval.KFold(300, 3, shuffle=True, random_state=0) ind = np.arange(300) all_folds = None for train, test in kf: assert_true(np.any(np.arange(100) != ind[test])) assert_true(np.any(np.arange(100, 200) != ind[test])) assert_true(np.any(np.arange(200, 300) != ind[test])) if all_folds is None: all_folds = ind[test].copy() else: all_folds = np.concatenate((all_folds, ind[test])) all_folds.sort() assert_array_equal(all_folds, ind) def test_shuffle_stratifiedkfold(): # Check that shuffling is happening when requested, and for proper # sample coverage labels = [0] * 20 + [1] * 20 kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0)) kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1)) for (_, test0), (_, test1) in zip(kf0, kf1): assert_true(set(test0) != set(test1)) check_cv_coverage(kf0, expected_n_iter=5, n_samples=40) def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 # The digits samples are dependent: they are apparently grouped by authors # although we don't have any information on the groups segment locations # for this data. We can highlight this fact be computing k-fold cross- # validation with and without shuffling: we observe that the shuffling case # wrongly makes the IID assumption and is therefore too optimistic: it # estimates a much higher accuracy (around 0.96) than than the non # shuffling variant (around 0.86). digits = load_digits() X, y = digits.data[:800], digits.target[:800] model = SVC(C=10, gamma=0.005) n = len(y) cv = cval.KFold(n, 5, shuffle=False) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) # Shuffling the data artificially breaks the dependency and hides the # overfitting of the model with regards to the writing style of the authors # by yielding a seriously overestimated score: cv = cval.KFold(n, 5, shuffle=True, random_state=0) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) cv = cval.KFold(n, 5, shuffle=True, random_state=1) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) # Similarly, StratifiedKFold should try to shuffle the data as little # as possible (while respecting the balanced class constraints) # and thus be able to detect the dependency by not overestimating # the CV score either. As the digits dataset is approximately balanced # the estimated mean score is close to the score measured with # non-shuffled KFold cv = cval.StratifiedKFold(y, 5) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) def test_label_kfold(): rng = np.random.RandomState(0) # Parameters of the test n_labels = 15 n_samples = 1000 n_folds = 5 # Construct the test data tolerance = 0.05 * n_samples # 5 percent error allowed labels = rng.randint(0, n_labels, n_samples) folds = cval.LabelKFold(labels, n_folds=n_folds).idxs ideal_n_labels_per_fold = n_samples // n_folds # Check that folds have approximately the same size assert_equal(len(folds), len(labels)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_labels_per_fold)) # Check that each label appears only in 1 fold for label in np.unique(labels): assert_equal(len(np.unique(folds[labels == label])), 1) # Check that no label is on both sides of the split labels = np.asarray(labels, dtype=object) for train, test in cval.LabelKFold(labels, n_folds=n_folds): assert_equal(len(np.intersect1d(labels[train], labels[test])), 0) # Construct the test data labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean', 'Francis', 'Robert', 'Michel', 'Rachel', 'Lois', 'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean', 'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix', 'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky', 'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'] labels = np.asarray(labels, dtype=object) n_labels = len(np.unique(labels)) n_samples = len(labels) n_folds = 5 tolerance = 0.05 * n_samples # 5 percent error allowed folds = cval.LabelKFold(labels, n_folds=n_folds).idxs ideal_n_labels_per_fold = n_samples // n_folds # Check that folds have approximately the same size assert_equal(len(folds), len(labels)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_labels_per_fold)) # Check that each label appears only in 1 fold for label in np.unique(labels): assert_equal(len(np.unique(folds[labels == label])), 1) # Check that no label is on both sides of the split for train, test in cval.LabelKFold(labels, n_folds=n_folds): assert_equal(len(np.intersect1d(labels[train], labels[test])), 0) # Should fail if there are more folds than labels labels = np.array([1, 1, 1, 2, 2]) assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3) def test_shuffle_split(): ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0) ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0) ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0) for typ in six.integer_types: ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0) for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): assert_array_equal(t1[0], t2[0]) assert_array_equal(t2[0], t3[0]) assert_array_equal(t3[0], t4[0]) assert_array_equal(t1[1], t2[1]) assert_array_equal(t2[1], t3[1]) assert_array_equal(t3[1], t4[1]) def test_stratified_shuffle_split_init(): y = np.asarray([0, 1, 1, 1, 2, 2, 2]) # Check that error is raised if there is a class with only one sample assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2) # Check that error is raised if the test set size is smaller than n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2) # Check that error is raised if the train set size is smaller than # n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2) y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8) # Train size or test size too small assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2) def test_stratified_shuffle_split_iter(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), np.array([-1] * 800 + [1] * 50) ] for y in ys: sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33, random_state=0) for train, test in sss: assert_array_equal(np.unique(y[train]), np.unique(y[test])) # Checks if folds keep classes proportions p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(len(y[train]))) p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(len(y[test]))) assert_array_almost_equal(p_train, p_test, 1) assert_equal(y[train].size + y[test].size, y.size) assert_array_equal(np.intersect1d(train, test), []) def test_stratified_shuffle_split_even(): # Test the StratifiedShuffleSplit, indices are drawn with a # equal chance n_folds = 5 n_iter = 1000 def assert_counts_are_ok(idx_counts, p): # Here we test that the distribution of the counts # per index is close enough to a binomial threshold = 0.05 / n_splits bf = stats.binom(n_splits, p) for count in idx_counts: p = bf.pmf(count) assert_true(p > threshold, "An index is not drawn with chance corresponding " "to even draws") for n_samples in (6, 22): labels = np.array((n_samples // 2) * [0, 1]) splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter, test_size=1. / n_folds, random_state=0) train_counts = [0] * n_samples test_counts = [0] * n_samples n_splits = 0 for train, test in splits: n_splits += 1 for counter, ids in [(train_counts, train), (test_counts, test)]: for id in ids: counter[id] += 1 assert_equal(n_splits, n_iter) assert_equal(len(train), splits.n_train) assert_equal(len(test), splits.n_test) assert_equal(len(set(train).intersection(test)), 0) label_counts = np.unique(labels) assert_equal(splits.test_size, 1.0 / n_folds) assert_equal(splits.n_train + splits.n_test, len(labels)) assert_equal(len(label_counts), 2) ex_test_p = float(splits.n_test) / n_samples ex_train_p = float(splits.n_train) / n_samples assert_counts_are_ok(train_counts, ex_train_p) assert_counts_are_ok(test_counts, ex_test_p) def test_predefinedsplit_with_kfold_split(): # Check that PredefinedSplit can reproduce a split generated by Kfold. folds = -1 * np.ones(10) kf_train = [] kf_test = [] for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)): kf_train.append(train_ind) kf_test.append(test_ind) folds[test_ind] = i ps_train = [] ps_test = [] ps = cval.PredefinedSplit(folds) for train_ind, test_ind in ps: ps_train.append(train_ind) ps_test.append(test_ind) assert_array_equal(ps_train, kf_train) assert_array_equal(ps_test, kf_test) def test_label_shuffle_split(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), ] for y in ys: n_iter = 6 test_size = 1. / 3 slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size, random_state=0) # Make sure the repr works repr(slo) # Test that the length is correct assert_equal(len(slo), n_iter) y_unique = np.unique(y) for train, test in slo: # First test: no train label is in the test set and vice versa y_train_unique = np.unique(y[train]) y_test_unique = np.unique(y[test]) assert_false(np.any(np.in1d(y[train], y_test_unique))) assert_false(np.any(np.in1d(y[test], y_train_unique))) # Second test: train and test add up to all the data assert_equal(y[train].size + y[test].size, y.size) # Third test: train and test are disjoint assert_array_equal(np.intersect1d(train, test), []) # Fourth test: # unique train and test labels are correct, # +- 1 for rounding error assert_true(abs(len(y_test_unique) - round(test_size * len(y_unique))) <= 1) assert_true(abs(len(y_train_unique) - round((1.0 - test_size) * len(y_unique))) <= 1) def test_leave_label_out_changing_labels(): # Check that LeaveOneLabelOut and LeavePLabelOut work normally if # the labels variable is changed before calling __iter__ labels = np.array([0, 1, 2, 1, 1, 2, 0, 0]) labels_changing = np.array(labels, copy=True) lolo = cval.LeaveOneLabelOut(labels) lolo_changing = cval.LeaveOneLabelOut(labels_changing) lplo = cval.LeavePLabelOut(labels, p=2) lplo_changing = cval.LeavePLabelOut(labels_changing, p=2) labels_changing[:] = 0 for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): assert_array_equal(train, train_chan) assert_array_equal(test, test_chan) def test_cross_val_score(): clf = MockClassifier() for a in range(-10, 10): clf.a = a # Smoke test scores = cval.cross_val_score(clf, X, y) assert_array_equal(scores, clf.score(X, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) scores = cval.cross_val_score(clf, X_sparse, y) assert_array_equal(scores, clf.score(X_sparse, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) scores = cval.cross_val_score(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) scores = cval.cross_val_score(clf, X, y.tolist()) assert_raises(ValueError, cval.cross_val_score, clf, X, y, scoring="sklearn") # test with 3d X and X_3d = X[:, :, np.newaxis] clf = MockClassifier(allow_nd=True) scores = cval.cross_val_score(clf, X_3d, y) clf = MockClassifier(allow_nd=False) assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y) def test_cross_val_score_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_score(clf, X_df, y_ser) def test_cross_val_score_mask(): # test that cross_val_score works with boolean masks svm = SVC(kernel="linear") iris = load_iris() X, y = iris.data, iris.target cv_indices = cval.KFold(len(y), 5) scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices) cv_indices = cval.KFold(len(y), 5) cv_masks = [] for train, test in cv_indices: mask_train = np.zeros(len(y), dtype=np.bool) mask_test = np.zeros(len(y), dtype=np.bool) mask_train[train] = 1 mask_test[test] = 1 cv_masks.append((train, test)) scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks) assert_array_equal(scores_indices, scores_masks) def test_cross_val_score_precomputed(): # test for svm with precomputed kernel svm = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target linear_kernel = np.dot(X, X.T) score_precomputed = cval.cross_val_score(svm, linear_kernel, y) svm = SVC(kernel="linear") score_linear = cval.cross_val_score(svm, X, y) assert_array_equal(score_precomputed, score_linear) # Error raised for non-square X svm = SVC(kernel="precomputed") assert_raises(ValueError, cval.cross_val_score, svm, X, y) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cval.cross_val_score, svm, linear_kernel.tolist(), y) def test_cross_val_score_fit_params(): clf = MockClassifier() n_samples = X.shape[0] n_classes = len(np.unique(y)) DUMMY_INT = 42 DUMMY_STR = '42' DUMMY_OBJ = object() def assert_fit_params(clf): # Function to test that the values are passed correctly to the # classifier arguments for non-array type assert_equal(clf.dummy_int, DUMMY_INT) assert_equal(clf.dummy_str, DUMMY_STR) assert_equal(clf.dummy_obj, DUMMY_OBJ) fit_params = {'sample_weight': np.ones(n_samples), 'class_prior': np.ones(n_classes) / n_classes, 'sparse_sample_weight': W_sparse, 'sparse_param': P_sparse, 'dummy_int': DUMMY_INT, 'dummy_str': DUMMY_STR, 'dummy_obj': DUMMY_OBJ, 'callback': assert_fit_params} cval.cross_val_score(clf, X, y, fit_params=fit_params) def test_cross_val_score_score_func(): clf = MockClassifier() _score_func_args = [] def score_func(y_test, y_predict): _score_func_args.append((y_test, y_predict)) return 1.0 with warnings.catch_warnings(record=True): scoring = make_scorer(score_func) score = cval.cross_val_score(clf, X, y, scoring=scoring) assert_array_equal(score, [1.0, 1.0, 1.0]) assert len(_score_func_args) == 3 def test_cross_val_score_errors(): class BrokenEstimator: pass assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X) def test_train_test_split_errors(): assert_raises(ValueError, cval.train_test_split) assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1) assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6, train_size=0.6) assert_raises(ValueError, cval.train_test_split, range(3), test_size=np.float32(0.6), train_size=np.float32(0.6)) assert_raises(ValueError, cval.train_test_split, range(3), test_size="wrong_type") assert_raises(ValueError, cval.train_test_split, range(3), test_size=2, train_size=4) assert_raises(TypeError, cval.train_test_split, range(3), some_argument=1.1) assert_raises(ValueError, cval.train_test_split, range(3), range(42)) def test_train_test_split(): X = np.arange(100).reshape((10, 10)) X_s = coo_matrix(X) y = np.arange(10) # simple test split = cval.train_test_split(X, y, test_size=None, train_size=.5) X_train, X_test, y_train, y_test = split assert_equal(len(y_test), len(y_train)) # test correspondence of X and y assert_array_equal(X_train[:, 0], y_train * 10) assert_array_equal(X_test[:, 0], y_test * 10) # conversion of lists to arrays (deprecated?) with warnings.catch_warnings(record=True): split = cval.train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_array_equal(X_train, X_s_train.toarray()) assert_array_equal(X_test, X_s_test.toarray()) # don't convert lists to anything else by default split = cval.train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_true(isinstance(y_train, list)) assert_true(isinstance(y_test, list)) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) split = cval.train_test_split(X_4d, y_3d) assert_equal(split[0].shape, (7, 5, 3, 2)) assert_equal(split[1].shape, (3, 5, 3, 2)) assert_equal(split[2].shape, (7, 7, 11)) assert_equal(split[3].shape, (3, 7, 11)) # test stratification option y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): train, test = cval.train_test_split(y, test_size=test_size, stratify=y, random_state=0) assert_equal(len(test), exp_test_size) assert_equal(len(test) + len(train), len(y)) # check the 1:1 ratio of ones and twos in the data is preserved assert_equal(np.sum(train == 1), np.sum(train == 2)) def train_test_split_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [MockDataFrame] try: from pandas import DataFrame types.append(DataFrame) except ImportError: pass for InputFeatureType in types: # X dataframe X_df = InputFeatureType(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, InputFeatureType)) assert_true(isinstance(X_test, InputFeatureType)) def train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, MockDataFrame)) assert_true(isinstance(X_test, MockDataFrame)) def test_cross_val_score_with_score_func_classification(): iris = load_iris() clf = SVC(kernel='linear') # Default score (should be the accuracy score) scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5) assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2) # Correct classification score (aka. zero / one score) - should be the # same as the default estimator score zo_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="accuracy", cv=5) assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2) # F1 score (class are balanced so f1_score should be equal to zero/one # score f1_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted", cv=5) assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2) def test_cross_val_score_with_score_func_regression(): X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) reg = Ridge() # Default score of the Ridge regression estimator scores = cval.cross_val_score(reg, X, y, cv=5) assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # R2 score (aka. determination coefficient) - should be the # same as the default estimator score r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5) assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # Mean squared error; this is a loss function, so "scores" are negative mse_scores = cval.cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error") expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) assert_array_almost_equal(mse_scores, expected_mse, 2) # Explained variance scoring = make_scorer(explained_variance_score) ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring) assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) def test_permutation_score(): iris = load_iris() X = iris.data X_sparse = coo_matrix(X) y = iris.target svm = SVC(kernel='linear') cv = cval.StratifiedKFold(y, 2) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_greater(score, 0.9) assert_almost_equal(pvalue, 0.0, 1) score_label, _, pvalue_label = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # check that we obtain the same results with a sparse representation svm_sparse = SVC(kernel='linear') cv_sparse = cval.StratifiedKFold(y, 2) score_label, _, pvalue_label = cval.permutation_test_score( svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # test with custom scoring object def custom_score(y_true, y_pred): return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0]) scorer = make_scorer(custom_score) score, _, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0) assert_almost_equal(score, .93, 2) assert_almost_equal(pvalue, 0.01, 3) # set random y y = np.mod(np.arange(len(y)), 3) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_less(score, 0.5) assert_greater(pvalue, 0.2) def test_cross_val_generator_with_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) # explicitly passing indices value is deprecated loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ps = cval.PredefinedSplit([1, 1, 2, 2]) ss = cval.ShuffleSplit(2) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] @ignore_warnings def test_cross_val_generator_with_default_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ss = cval.ShuffleSplit(2) ps = cval.PredefinedSplit([1, 1, 2, 2]) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] def test_shufflesplit_errors(): assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1, train_size=0.95) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3) assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None, train_size=None) def test_shufflesplit_reproducible(): # Check that iterating twice on the ShuffleSplit gives the same # sequence of train-test when the random_state is given ss = cval.ShuffleSplit(10, random_state=21) assert_array_equal(list(a for a, b in ss), list(a for a, b in ss)) def test_safe_split_with_precomputed_kernel(): clf = SVC() clfp = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target K = np.dot(X, X.T) cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0) tr, te = list(cv)[0] X_tr, y_tr = cval._safe_split(clf, X, y, tr) K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr) assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T)) X_te, y_te = cval._safe_split(clf, X, y, te, tr) K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr) assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T)) def test_cross_val_score_allow_nans(): # Check that cross_val_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.cross_val_score(p, X, y, cv=5) def test_train_test_split_allow_nans(): # Check that train_test_split allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) cval.train_test_split(X, y, test_size=0.2, random_state=42) def test_permutation_test_score_allow_nans(): # Check that permutation_test_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.permutation_test_score(p, X, y, cv=5) def test_check_cv_return_types(): X = np.ones((9, 2)) cv = cval.check_cv(3, X, classifier=False) assert_true(isinstance(cv, cval.KFold)) y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) cv = cval.check_cv(3, X, y_binary, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) cv = cval.check_cv(3, X, y_multiclass, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) X = np.ones((5, 2)) y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]] cv = cval.check_cv(3, X, y_multilabel, classifier=True) assert_true(isinstance(cv, cval.KFold)) y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) cv = cval.check_cv(3, X, y_multioutput, classifier=True) assert_true(isinstance(cv, cval.KFold)) def test_cross_val_score_multilabel(): X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1], [-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]]) y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]]) clf = KNeighborsClassifier(n_neighbors=1) scoring_micro = make_scorer(precision_score, average='micro') scoring_macro = make_scorer(precision_score, average='macro') scoring_samples = make_scorer(precision_score, average='samples') score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5) score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5) score_samples = cval.cross_val_score(clf, X, y, scoring=scoring_samples, cv=5) assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) def test_cross_val_predict(): boston = load_boston() X, y = boston.data, boston.target cv = cval.KFold(len(boston.target)) est = Ridge() # Naive loop (should be same as cross_val_predict): preds2 = np.zeros_like(y) for train, test in cv: est.fit(X[train], y[train]) preds2[test] = est.predict(X[test]) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_array_almost_equal(preds, preds2) preds = cval.cross_val_predict(est, X, y) assert_equal(len(preds), len(y)) cv = cval.LeaveOneOut(len(y)) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_equal(len(preds), len(y)) Xsp = X.copy() Xsp *= (Xsp > np.median(Xsp)) Xsp = coo_matrix(Xsp) preds = cval.cross_val_predict(est, Xsp, y) assert_array_almost_equal(len(preds), len(y)) preds = cval.cross_val_predict(KMeans(), X) assert_equal(len(preds), len(y)) def bad_cv(): for i in range(4): yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv()) def test_cross_val_predict_input_types(): clf = Ridge() # Smoke test predictions = cval.cross_val_predict(clf, X, y) assert_equal(predictions.shape, (10,)) # test with multioutput y predictions = cval.cross_val_predict(clf, X_sparse, X) assert_equal(predictions.shape, (10, 2)) predictions = cval.cross_val_predict(clf, X_sparse, y) assert_array_equal(predictions.shape, (10,)) # test with multioutput y predictions = cval.cross_val_predict(clf, X_sparse, X) assert_array_equal(predictions.shape, (10, 2)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) predictions = cval.cross_val_predict(clf, X, y.tolist()) # test with 3d X and X_3d = X[:, :, np.newaxis] check_3d = lambda x: x.ndim == 3 clf = CheckingClassifier(check_X=check_3d) predictions = cval.cross_val_predict(clf, X_3d, y) assert_array_equal(predictions.shape, (10,)) def test_cross_val_predict_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_predict(clf, X_df, y_ser) def test_sparse_fit_params(): iris = load_iris() X, y = iris.data, iris.target clf = MockClassifier() fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))} a = cval.cross_val_score(clf, X, y, fit_params=fit_params) assert_array_equal(a, np.ones(3)) def test_check_is_partition(): p = np.arange(100) assert_true(cval._check_is_partition(p, 100)) assert_false(cval._check_is_partition(np.delete(p, 23), 100)) p[0] = 23 assert_false(cval._check_is_partition(p, 100)) def test_cross_val_predict_sparse_prediction(): # check that cross_val_predict gives same result for sparse and dense input X, y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, return_indicator=True, random_state=1) X_sparse = csr_matrix(X) y_sparse = csr_matrix(y) classif = OneVsRestClassifier(SVC(kernel='linear')) preds = cval.cross_val_predict(classif, X, y, cv=10) preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10) preds_sparse = preds_sparse.toarray() assert_array_almost_equal(preds_sparse, preds)
bsd-3-clause
robertwb/incubator-beam
sdks/python/apache_beam/dataframe/frames_test.py
3
78735
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pandas as pd from parameterized import parameterized import apache_beam as beam from apache_beam.dataframe import expressions from apache_beam.dataframe import frame_base from apache_beam.dataframe import frames PD_VERSION = tuple(map(int, pd.__version__.split('.'))) GROUPBY_DF = pd.DataFrame({ 'group': ['a' if i % 5 == 0 or i % 3 == 0 else 'b' for i in range(100)], 'foo': [None if i % 11 == 0 else i for i in range(100)], 'bar': [None if i % 7 == 0 else 99 - i for i in range(100)], 'baz': [None if i % 13 == 0 else i * 2 for i in range(100)], 'bool': [i % 17 == 0 for i in range(100)], 'str': [str(i) for i in range(100)], }) def _get_deferred_args(*args): return [ frame_base.DeferredFrame.wrap( expressions.ConstantExpression(arg, arg[0:0])) for arg in args ] class _AbstractFrameTest(unittest.TestCase): """Test sub-class with utilities for verifying DataFrame operations.""" def _run_error_test( self, func, *args, construction_time=True, distributed=True): """Verify that func(*args) raises the same exception in pandas and in Beam. Note that by default this only checks for exceptions that the Beam DataFrame API raises during expression generation (i.e. construction time). Exceptions raised while the pipeline is executing are less helpful, but are sometimes unavoidable (e.g. data validation exceptions), to check for these exceptions use construction_time=False.""" deferred_args = _get_deferred_args(*args) # Get expected error try: expected = func(*args) except Exception as e: expected_error = e else: raise AssertionError( "Expected an error, but executing with pandas successfully " f"returned:\n{expected}") # Get actual error if construction_time: try: _ = func(*deferred_args)._expr except Exception as e: actual = e else: raise AssertionError( f"Expected an error:\n{expected_error}\nbut Beam successfully " f"generated an expression.") else: # not construction_time # Check for an error raised during pipeline execution expr = func(*deferred_args)._expr session_type = ( expressions.PartitioningSession if distributed else expressions.Session) try: result = session_type({}).evaluate(expr) except Exception as e: actual = e else: raise AssertionError( f"Expected an error:\n{expected_error}\nbut Beam successfully " f"Computed the result:\n{result}.") # Verify if (not isinstance(actual, type(expected_error)) or not str(actual) == str(expected_error)): raise AssertionError( f'Expected {expected_error!r} to be raised, but got {actual!r}' ) from actual def _run_inplace_test(self, func, arg, **kwargs): """Verify an inplace operation performed by func. Checks that func performs the same inplace operation on arg, in pandas and in Beam.""" def wrapper(df): df = df.copy() func(df) return df self._run_test(wrapper, arg, **kwargs) def _run_test( self, func, *args, distributed=True, nonparallel=False, check_proxy=True): """Verify that func(*args) produces the same result in pandas and in Beam. Args: distributed (bool): Whether or not to use PartitioningSession to simulate parallel execution. nonparallel (bool): Whether or not this function contains a non-parallelizable operation. If True, the expression will be generated twice, once outside of an allow_non_parallel_operations block (to verify NonParallelOperation is raised), and again inside of an allow_non_parallel_operations block to actually generate an expression to verify. check_proxy (bool): Whether or not to check that the proxy of the generated expression matches the actual result, defaults to True. This option should NOT be set to False in tests added for new operations if at all possible. Instead make sure the new operation produces the correct proxy. This flag only exists as an escape hatch until existing failures can be addressed (BEAM-12379).""" # Compute expected value expected = func(*args) # Compute actual value deferred_args = _get_deferred_args(*args) if nonparallel: # First run outside a nonparallel block to confirm this raises as expected with self.assertRaises(expressions.NonParallelOperation) as raised: func(*deferred_args) if raised.exception.msg.startswith( "Encountered non-parallelizable form of"): raise AssertionError( "Default NonParallelOperation raised, please specify a reason in " "the Singleton() partitioning requirement for this operation." ) from raised.exception # Re-run in an allow non parallel block to get an expression to verify with beam.dataframe.allow_non_parallel_operations(): expr = func(*deferred_args)._expr else: expr = func(*deferred_args)._expr # Compute the result of the generated expression session_type = ( expressions.PartitioningSession if distributed else expressions.Session) actual = session_type({}).evaluate(expr) # Verify if isinstance(expected, pd.core.generic.NDFrame): if distributed: if expected.index.is_unique: expected = expected.sort_index() actual = actual.sort_index() else: expected = expected.sort_values(list(expected.columns)) actual = actual.sort_values(list(actual.columns)) if isinstance(expected, pd.Series): pd.testing.assert_series_equal(expected, actual) elif isinstance(expected, pd.DataFrame): pd.testing.assert_frame_equal(expected, actual) else: raise ValueError( f"Expected value is a {type(expected)}," "not a Series or DataFrame.") else: # Expectation is not a pandas object if isinstance(expected, float): if np.isnan(expected): cmp = np.isnan else: cmp = lambda x: np.isclose(expected, x) else: cmp = expected.__eq__ self.assertTrue( cmp(actual), 'Expected:\n\n%r\n\nActual:\n\n%r' % (expected, actual)) if check_proxy: # Verify that the actual result agrees with the proxy proxy = expr.proxy() if type(actual) in (np.float32, np.float64): self.assertTrue(type(actual) == type(proxy) or np.isnan(proxy)) else: self.assertEqual(type(actual), type(proxy)) if isinstance(expected, pd.core.generic.NDFrame): if isinstance(expected, pd.Series): self.assertEqual(actual.dtype, proxy.dtype) self.assertEqual(actual.name, proxy.name) elif isinstance(expected, pd.DataFrame): pd.testing.assert_series_equal(actual.dtypes, proxy.dtypes) else: raise ValueError( f"Expected value is a {type(expected)}," "not a Series or DataFrame.") self.assertEqual(actual.index.names, proxy.index.names) for i in range(actual.index.nlevels): self.assertEqual( actual.index.get_level_values(i).dtype, proxy.index.get_level_values(i).dtype) class DeferredFrameTest(_AbstractFrameTest): """Miscellaneous tessts for DataFrame operations.""" def test_series_arithmetic(self): a = pd.Series([1, 2, 3]) b = pd.Series([100, 200, 300]) self._run_test(lambda a, b: a - 2 * b, a, b) def test_get_column(self): df = pd.DataFrame({ 'Animal': ['Falcon', 'Falcon', 'Parrot', 'Parrot'], 'Speed': [380., 370., 24., 26.] }) self._run_test(lambda df: df['Animal'], df) self._run_test(lambda df: df.Speed, df) self._run_test(lambda df: df.get('Animal'), df) self._run_test(lambda df: df.get('FOO', df.Animal), df) def test_series_xs(self): # pandas doctests only verify DataFrame.xs, here we verify Series.xs as well d = { 'num_legs': [4, 4, 2, 2], 'num_wings': [0, 0, 2, 2], 'class': ['mammal', 'mammal', 'mammal', 'bird'], 'animal': ['cat', 'dog', 'bat', 'penguin'], 'locomotion': ['walks', 'walks', 'flies', 'walks'] } df = pd.DataFrame(data=d) df = df.set_index(['class', 'animal', 'locomotion']) self._run_test(lambda df: df.num_legs.xs('mammal'), df) self._run_test(lambda df: df.num_legs.xs(('mammal', 'dog')), df) self._run_test(lambda df: df.num_legs.xs('cat', level=1), df) self._run_test( lambda df: df.num_legs.xs(('bird', 'walks'), level=[0, 'locomotion']), df) def test_set_column(self): def new_column(df): df['NewCol'] = df['Speed'] df = pd.DataFrame({ 'Animal': ['Falcon', 'Falcon', 'Parrot', 'Parrot'], 'Speed': [380., 370., 24., 26.] }) self._run_inplace_test(new_column, df) def test_set_column_from_index(self): def new_column(df): df['NewCol'] = df.index df = pd.DataFrame({ 'Animal': ['Falcon', 'Falcon', 'Parrot', 'Parrot'], 'Speed': [380., 370., 24., 26.] }) self._run_inplace_test(new_column, df) def test_tz_localize_ambiguous_series(self): # This replicates a tz_localize doctest: # s.tz_localize('CET', ambiguous=np.array([True, True, False])) # But using a DeferredSeries instead of a np array s = pd.Series( range(3), index=pd.DatetimeIndex([ '2018-10-28 01:20:00', '2018-10-28 02:36:00', '2018-10-28 03:46:00' ])) ambiguous = pd.Series([True, True, False], index=s.index) self._run_test( lambda s, ambiguous: s.tz_localize('CET', ambiguous=ambiguous), s, ambiguous) def test_tz_convert(self): # This replicates a tz_localize doctest: # s.tz_localize('CET', ambiguous=np.array([True, True, False])) # But using a DeferredSeries instead of a np array s = pd.Series( range(3), index=pd.DatetimeIndex([ '2018-10-27 01:20:00', '2018-10-27 02:36:00', '2018-10-27 03:46:00' ], tz='Europe/Berlin')) self._run_test(lambda s: s.tz_convert('America/Los_Angeles'), s) def test_sort_index_columns(self): df = pd.DataFrame({ 'c': range(10), 'a': range(10), 'b': range(10), np.nan: range(10), }) self._run_test(lambda df: df.sort_index(axis=1), df) self._run_test(lambda df: df.sort_index(axis=1, ascending=False), df) self._run_test(lambda df: df.sort_index(axis=1, na_position='first'), df) def test_where_callable_args(self): df = pd.DataFrame( np.arange(10, dtype=np.int64).reshape(-1, 2), columns=['A', 'B']) self._run_test( lambda df: df.where(lambda df: df % 2 == 0, lambda df: df * 10), df) def test_where_concrete_args(self): df = pd.DataFrame( np.arange(10, dtype=np.int64).reshape(-1, 2), columns=['A', 'B']) self._run_test( lambda df: df.where( df % 2 == 0, pd.Series({ 'A': 123, 'B': 456 }), axis=1), df) def test_combine_dataframe(self): df = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 self._run_test( lambda df, df2: df.combine(df2, take_smaller), df, df2, nonparallel=True) def test_combine_dataframe_fill(self): df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 self._run_test( lambda df1, df2: df1.combine(df2, take_smaller, fill_value=-5), df1, df2, nonparallel=True) def test_combine_Series(self): with expressions.allow_non_parallel_operations(): s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) self._run_test(lambda s1, s2: s1.combine(s2, max), s1, s2) def test_combine_first_dataframe(self): df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) self._run_test(lambda df1, df2: df1.combine_first(df2), df1, df2) def test_combine_first_series(self): s1 = pd.Series([1, np.nan]) s2 = pd.Series([3, 4]) self._run_test(lambda s1, s2: s1.combine_first(s2), s1, s2) def test_add_prefix(self): df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) s = pd.Series([1, 2, 3, 4]) self._run_test(lambda df: df.add_prefix('col_'), df) self._run_test(lambda s: s.add_prefix('col_'), s) def test_add_suffix(self): df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) s = pd.Series([1, 2, 3, 4]) self._run_test(lambda df: df.add_suffix('_col'), df) self._run_test(lambda s: s.add_prefix('_col'), s) def test_set_index(self): df = pd.DataFrame({ # [19, 18, ..] 'index1': reversed(range(20)), # [15, 16, .., 0, 1, .., 13, 14] 'index2': np.roll(range(20), 5), # ['', 'a', 'bb', ...] 'values': [chr(ord('a') + i) * i for i in range(20)], }) self._run_test(lambda df: df.set_index(['index1', 'index2']), df) self._run_test(lambda df: df.set_index(['index1', 'index2'], drop=True), df) self._run_test(lambda df: df.set_index('values'), df) self._run_error_test(lambda df: df.set_index('bad'), df) self._run_error_test( lambda df: df.set_index(['index2', 'bad', 'really_bad']), df) def test_series_drop_ignore_errors(self): midx = pd.MultiIndex( levels=[['lama', 'cow', 'falcon'], ['speed', 'weight', 'length']], codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]]) s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) # drop() requires singleton partitioning unless errors are ignored # Add some additional tests here to make sure the implementation works in # non-singleton partitioning. self._run_test(lambda s: s.drop('lama', level=0, errors='ignore'), s) self._run_test(lambda s: s.drop(('cow', 'speed'), errors='ignore'), s) self._run_test(lambda s: s.drop('falcon', level=0, errors='ignore'), s) def test_dataframe_drop_ignore_errors(self): midx = pd.MultiIndex( levels=[['lama', 'cow', 'falcon'], ['speed', 'weight', 'length']], codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]]) df = pd.DataFrame( index=midx, columns=['big', 'small'], data=[[45, 30], [200, 100], [1.5, 1], [30, 20], [250, 150], [1.5, 0.8], [320, 250], [1, 0.8], [0.3, 0.2]]) # drop() requires singleton partitioning unless errors are ignored # Add some additional tests here to make sure the implementation works in # non-singleton partitioning. self._run_test( lambda df: df.drop(index='lama', level=0, errors='ignore'), df) self._run_test( lambda df: df.drop(index=('cow', 'speed'), errors='ignore'), df) self._run_test( lambda df: df.drop(index='falcon', level=0, errors='ignore'), df) self._run_test( lambda df: df.drop(index='cow', columns='small', errors='ignore'), df) def test_merge(self): # This is from the pandas doctests, but fails due to re-indexing being # order-sensitive. df1 = pd.DataFrame({ 'lkey': ['foo', 'bar', 'baz', 'foo'], 'value': [1, 2, 3, 5] }) df2 = pd.DataFrame({ 'rkey': ['foo', 'bar', 'baz', 'foo'], 'value': [5, 6, 7, 8] }) self._run_test( lambda df1, df2: df1.merge(df2, left_on='lkey', right_on='rkey').rename( index=lambda x: '*'), df1, df2, nonparallel=True, check_proxy=False) self._run_test( lambda df1, df2: df1.merge( df2, left_on='lkey', right_on='rkey', suffixes=('_left', '_right')). rename(index=lambda x: '*'), df1, df2, nonparallel=True, check_proxy=False) def test_merge_left_join(self): # This is from the pandas doctests, but fails due to re-indexing being # order-sensitive. df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]}) df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]}) self._run_test( lambda df1, df2: df1.merge(df2, how='left', on='a').rename(index=lambda x: '*'), df1, df2, nonparallel=True, check_proxy=False) def test_merge_on_index(self): # This is from the pandas doctests, but fails due to re-indexing being # order-sensitive. df1 = pd.DataFrame({ 'lkey': ['foo', 'bar', 'baz', 'foo'], 'value': [1, 2, 3, 5] }).set_index('lkey') df2 = pd.DataFrame({ 'rkey': ['foo', 'bar', 'baz', 'foo'], 'value': [5, 6, 7, 8] }).set_index('rkey') self._run_test( lambda df1, df2: df1.merge(df2, left_index=True, right_index=True), df1, df2, check_proxy=False) def test_merge_same_key(self): df1 = pd.DataFrame({ 'key': ['foo', 'bar', 'baz', 'foo'], 'value': [1, 2, 3, 5] }) df2 = pd.DataFrame({ 'key': ['foo', 'bar', 'baz', 'foo'], 'value': [5, 6, 7, 8] }) self._run_test( lambda df1, df2: df1.merge(df2, on='key').rename(index=lambda x: '*'), df1, df2, nonparallel=True, check_proxy=False) self._run_test( lambda df1, df2: df1.merge(df2, on='key', suffixes=('_left', '_right')).rename( index=lambda x: '*'), df1, df2, nonparallel=True, check_proxy=False) def test_merge_same_key_doctest(self): df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]}) df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]}) self._run_test( lambda df1, df2: df1.merge(df2, how='left', on='a').rename(index=lambda x: '*'), df1, df2, nonparallel=True, check_proxy=False) # Test without specifying 'on' self._run_test( lambda df1, df2: df1.merge(df2, how='left').rename(index=lambda x: '*'), df1, df2, nonparallel=True, check_proxy=False) def test_merge_same_key_suffix_collision(self): df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2], 'a_lsuffix': [5, 6]}) df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4], 'a_rsuffix': [7, 8]}) self._run_test( lambda df1, df2: df1.merge( df2, how='left', on='a', suffixes=('_lsuffix', '_rsuffix')).rename( index=lambda x: '*'), df1, df2, nonparallel=True, check_proxy=False) # Test without specifying 'on' self._run_test( lambda df1, df2: df1.merge(df2, how='left', suffixes=('_lsuffix', '_rsuffix')). rename(index=lambda x: '*'), df1, df2, nonparallel=True, check_proxy=False) def test_value_counts_with_nans(self): # similar to doctests that verify value_counts, but include nan values to # make sure we handle them correctly. df = pd.DataFrame({ 'num_legs': [2, 4, 4, 6, np.nan, np.nan], 'num_wings': [2, 0, 0, 0, np.nan, 2] }, index=['falcon', 'dog', 'cat', 'ant', 'car', 'plane']) self._run_test(lambda df: df.value_counts(), df) self._run_test(lambda df: df.value_counts(normalize=True), df) self._run_test(lambda df: df.num_wings.value_counts(), df) self._run_test(lambda df: df.num_wings.value_counts(normalize=True), df) def test_value_counts_does_not_support_sort(self): df = pd.DataFrame({ 'num_legs': [2, 4, 4, 6, np.nan, np.nan], 'num_wings': [2, 0, 0, 0, np.nan, 2] }, index=['falcon', 'dog', 'cat', 'ant', 'car', 'plane']) with self.assertRaisesRegex(frame_base.WontImplementError, r"value_counts\(sort\=True\)"): self._run_test(lambda df: df.value_counts(sort=True), df) with self.assertRaisesRegex(frame_base.WontImplementError, r"value_counts\(sort\=True\)"): self._run_test(lambda df: df.num_wings.value_counts(sort=True), df) def test_series_getitem(self): s = pd.Series([x**2 for x in range(10)]) self._run_test(lambda s: s[...], s) self._run_test(lambda s: s[:], s) self._run_test(lambda s: s[s < 10], s) self._run_test(lambda s: s[lambda s: s < 10], s) s.index = s.index.map(float) self._run_test(lambda s: s[1.5:6], s) @parameterized.expand([ (pd.Series(range(10)), ), # unique (pd.Series(list(range(100)) + [0]), ), # non-unique int (pd.Series(list(range(100)) + [0]) / 100, ), # non-unique flt (pd.Series(['a', 'b', 'c', 'd']), ), # unique str (pd.Series(['a', 'b', 'a', 'c', 'd']), ), # non-unique str ]) def test_series_is_unique(self, series): self._run_test(lambda s: s.is_unique, series) def test_dataframe_getitem(self): df = pd.DataFrame({'A': [x**2 for x in range(6)], 'B': list('abcdef')}) self._run_test(lambda df: df['A'], df) self._run_test(lambda df: df[['A', 'B']], df) self._run_test(lambda df: df[:], df) self._run_test(lambda df: df[df.A < 10], df) df.index = df.index.map(float) self._run_test(lambda df: df[1.5:4], df) def test_loc(self): dates = pd.date_range('1/1/2000', periods=8) # TODO(BEAM-11757): We do not preserve the freq attribute on a DateTime # index dates.freq = None df = pd.DataFrame( np.arange(32).reshape((8, 4)), index=dates, columns=['A', 'B', 'C', 'D']) self._run_test(lambda df: df.loc[:], df) self._run_test(lambda df: df.loc[:, 'A'], df) self._run_test(lambda df: df.loc[:dates[3]], df) self._run_test(lambda df: df.loc[df.A > 10], df) self._run_test(lambda df: df.loc[lambda df: df.A > 10], df) self._run_test(lambda df: df.C.loc[df.A > 10], df) self._run_test(lambda df, s: df.loc[s.loc[1:3]], df, pd.Series(dates)) def test_append_sort(self): # yapf: disable df1 = pd.DataFrame({'int': [1, 2, 3], 'str': ['a', 'b', 'c']}, columns=['int', 'str'], index=[1, 3, 5]) df2 = pd.DataFrame({'int': [4, 5, 6], 'str': ['d', 'e', 'f']}, columns=['str', 'int'], index=[2, 4, 6]) # yapf: enable self._run_test(lambda df1, df2: df1.append(df2, sort=True), df1, df2) self._run_test(lambda df1, df2: df1.append(df2, sort=False), df1, df2) self._run_test(lambda df1, df2: df2.append(df1, sort=True), df1, df2) self._run_test(lambda df1, df2: df2.append(df1, sort=False), df1, df2) def test_smallest_largest(self): df = pd.DataFrame({'A': [1, 1, 2, 2], 'B': [2, 3, 5, 7]}) self._run_test(lambda df: df.nlargest(1, 'A', keep='all'), df) self._run_test(lambda df: df.nsmallest(3, 'A', keep='all'), df) self._run_test(lambda df: df.nlargest(3, ['A', 'B'], keep='all'), df) def test_series_cov_corr(self): for s in [pd.Series([1, 2, 3]), pd.Series(range(100)), pd.Series([x**3 for x in range(-50, 50)])]: self._run_test(lambda s: s.std(), s) self._run_test(lambda s: s.var(), s) self._run_test(lambda s: s.corr(s), s) self._run_test(lambda s: s.corr(s + 1), s) self._run_test(lambda s: s.corr(s * s), s) self._run_test(lambda s: s.cov(s * s), s) def test_dataframe_cov_corr(self): df = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c']) df.loc[df.index[:5], 'a'] = np.nan df.loc[df.index[5:10], 'b'] = np.nan self._run_test(lambda df: df.corr(), df) self._run_test(lambda df: df.cov(), df) self._run_test(lambda df: df.corr(min_periods=12), df) self._run_test(lambda df: df.cov(min_periods=12), df) self._run_test(lambda df: df.corrwith(df.a), df) self._run_test(lambda df: df[['a', 'b']].corrwith(df[['b', 'c']]), df) df2 = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c']) self._run_test( lambda df, df2: df.corrwith(df2, axis=1), df, df2, check_proxy=False) def test_corrwith_bad_axis(self): df = pd.DataFrame({'a': range(3), 'b': range(3, 6), 'c': range(6, 9)}) self._run_error_test(lambda df: df.corrwith(df.a, axis=2), df) self._run_error_test(lambda df: df.corrwith(df, axis=5), df) @unittest.skipIf(PD_VERSION < (1, 2), "na_action added in pandas 1.2.0") def test_applymap_na_action(self): # Replicates a doctest for na_action which is incompatible with # doctest framework df = pd.DataFrame([[pd.NA, 2.12], [3.356, 4.567]]) self._run_test( lambda df: df.applymap(lambda x: len(str(x)), na_action='ignore'), df, # TODO: generate proxy using naive type inference on fn check_proxy=False) def test_dataframe_eval_query(self): df = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c']) self._run_test(lambda df: df.eval('foo = a + b - c'), df) self._run_test(lambda df: df.query('a > b + c'), df) self._run_inplace_test(lambda df: df.eval('foo = a + b - c'), df) # Verify that attempting to access locals raises a useful error deferred_df = frame_base.DeferredFrame.wrap( expressions.ConstantExpression(df, df[0:0])) self.assertRaises( NotImplementedError, lambda: deferred_df.eval('foo = a + @b - c')) self.assertRaises( NotImplementedError, lambda: deferred_df.query('a > @b + c')) def test_index_name_assignment(self): df = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]}) df = df.set_index(['a', 'b'], drop=False) def change_index_names(df): df.index.names = ['A', None] self._run_inplace_test(change_index_names, df) def test_quantile(self): df = pd.DataFrame( np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), columns=['a', 'b']) self._run_test( lambda df: df.quantile(0.1, axis='columns'), df, check_proxy=False) self._run_test( lambda df: df.quantile(0.1, axis='columns'), df, check_proxy=False) with self.assertRaisesRegex(frame_base.WontImplementError, r"df\.quantile\(q=0\.1, axis='columns'\)"): self._run_test(lambda df: df.quantile([0.1, 0.5], axis='columns'), df) def test_dataframe_melt(self): df = pd.DataFrame({ 'A': { 0: 'a', 1: 'b', 2: 'c' }, 'B': { 0: 1, 1: 3, 2: 5 }, 'C': { 0: 2, 1: 4, 2: 6 } }) self._run_test( lambda df: df.melt(id_vars=['A'], value_vars=['B'], ignore_index=False), df) self._run_test( lambda df: df.melt( id_vars=['A'], value_vars=['B', 'C'], ignore_index=False), df) self._run_test( lambda df: df.melt( id_vars=['A'], value_vars=['B'], var_name='myVarname', value_name='myValname', ignore_index=False), df) self._run_test( lambda df: df.melt( id_vars=['A'], value_vars=['B', 'C'], ignore_index=False), df) df.columns = [list('ABC'), list('DEF')] self._run_test( lambda df: df.melt( col_level=0, id_vars=['A'], value_vars=['B'], ignore_index=False), df) self._run_test( lambda df: df.melt( id_vars=[('A', 'D')], value_vars=[('B', 'E')], ignore_index=False), df) def test_fillna_columns(self): df = pd.DataFrame( [[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1], [np.nan, np.nan, np.nan, 5], [np.nan, 3, np.nan, 4], [3, np.nan, np.nan, 4]], columns=list('ABCD')) self._run_test(lambda df: df.fillna(method='ffill', axis='columns'), df) self._run_test( lambda df: df.fillna(method='ffill', axis='columns', limit=1), df) self._run_test( lambda df: df.fillna(method='bfill', axis='columns', limit=1), df) # Intended behavior is unclear here. See # https://github.com/pandas-dev/pandas/issues/40989 # self._run_test(lambda df: df.fillna(axis='columns', value=100, # limit=2), df) def test_dataframe_fillna_dataframe_as_value(self): df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1], [np.nan, np.nan, np.nan, 5], [np.nan, 3, np.nan, 4]], columns=list("ABCD")) df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) self._run_test(lambda df, df2: df.fillna(df2), df, df2) def test_dataframe_fillna_series_as_value(self): df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1], [np.nan, np.nan, np.nan, 5], [np.nan, 3, np.nan, 4]], columns=list("ABCD")) s = pd.Series(range(4), index=list("ABCE")) self._run_test(lambda df, s: df.fillna(s), df, s) def test_series_fillna_series_as_value(self): df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1], [np.nan, np.nan, np.nan, 5], [np.nan, 3, np.nan, 4]], columns=list("ABCD")) df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) self._run_test(lambda df, df2: df.A.fillna(df2.A), df, df2) def test_append_verify_integrity(self): df1 = pd.DataFrame({'A': range(10), 'B': range(10)}, index=range(10)) df2 = pd.DataFrame({'A': range(10), 'B': range(10)}, index=range(9, 19)) self._run_error_test( lambda s1, s2: s1.append(s2, verify_integrity=True), df1['A'], df2['A'], construction_time=False) self._run_error_test( lambda df1, df2: df1.append(df2, verify_integrity=True), df1, df2, construction_time=False) def test_categorical_groupby(self): df = pd.DataFrame({'A': np.arange(6), 'B': list('aabbca')}) df['B'] = df['B'].astype(pd.CategoricalDtype(list('cab'))) df = df.set_index('B') # TODO(BEAM-11190): These aggregations can be done in index partitions, but # it will require a little more complex logic self._run_test(lambda df: df.groupby(level=0).sum(), df, nonparallel=True) self._run_test(lambda df: df.groupby(level=0).mean(), df, nonparallel=True) def test_dataframe_sum_nonnumeric_raises(self): # Attempting a numeric aggregation with the str column present should # raise, and suggest the numeric_only argument with self.assertRaisesRegex(frame_base.WontImplementError, 'numeric_only'): self._run_test(lambda df: df.sum(), GROUPBY_DF) # numeric_only=True should work self._run_test(lambda df: df.sum(numeric_only=True), GROUPBY_DF) # projecting only numeric columns should too self._run_test(lambda df: df[['foo', 'bar']].sum(), GROUPBY_DF) def test_insert(self): df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) self._run_inplace_test(lambda df: df.insert(1, 'C', df.A * 2), df) self._run_inplace_test( lambda df: df.insert(0, 'foo', pd.Series([8], index=[1])), df, check_proxy=False) self._run_inplace_test(lambda df: df.insert(2, 'bar', value='q'), df) def test_insert_does_not_support_list_value(self): df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) with self.assertRaisesRegex(frame_base.WontImplementError, r"insert\(value=list\)"): self._run_inplace_test(lambda df: df.insert(1, 'C', [7, 8, 9]), df) def test_drop_duplicates(self): df = pd.DataFrame({ 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], 'rating': [4, 4, 3.5, 15, 5] }) self._run_test(lambda df: df.drop_duplicates(keep=False), df) self._run_test( lambda df: df.drop_duplicates(subset=['brand'], keep=False), df) self._run_test( lambda df: df.drop_duplicates(subset=['brand', 'style'], keep=False), df) @parameterized.expand([ ( lambda base: base.from_dict({ 'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd'] }), ), ( lambda base: base.from_dict({ 'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd'] }, orient='index'), ), ( lambda base: base.from_records( np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], dtype=[('col_1', 'i4'), ('col_2', 'U1')])), ), ]) def test_create_methods(self, func): expected = func(pd.DataFrame) deferred_df = func(frames.DeferredDataFrame) actual = expressions.Session({}).evaluate(deferred_df._expr) pd.testing.assert_frame_equal(actual, expected) def test_replace(self): # verify a replace() doctest case that doesn't quite work in Beam as it uses # the default method='pad' df = pd.DataFrame({'A': ['bat', 'foo', 'bait'], 'B': ['abc', 'bar', 'xyz']}) self._run_test( lambda df: df.replace( regex={ r'^ba.$': 'new', 'foo': 'xyz' }, method=None), df) def test_sample_columns(self): df = pd.DataFrame({ 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], 'rating': [4, 4, 3.5, 15, 5] }) self._run_test(lambda df: df.sample(axis=1, n=2, random_state=1), df) self._run_error_test(lambda df: df.sample(axis=1, n=10, random_state=2), df) self._run_test( lambda df: df.sample(axis=1, n=10, random_state=3, replace=True), df) def test_cat(self): # Replicate the doctests from CategorigcalAccessor # These tests don't translate into pandas_doctests_test.py because it # tries to use astype("category") in Beam, which makes a non-deferred # column type. s = pd.Series(list("abbccc")).astype("category") self._run_test(lambda s: s.cat.rename_categories(list("cba")), s) self._run_test(lambda s: s.cat.reorder_categories(list("cba")), s) self._run_test(lambda s: s.cat.add_categories(["d", "e"]), s) self._run_test(lambda s: s.cat.remove_categories(["a", "c"]), s) self._run_test(lambda s: s.cat.set_categories(list("abcde")), s) self._run_test(lambda s: s.cat.as_ordered(), s) self._run_test(lambda s: s.cat.as_unordered(), s) self._run_test(lambda s: s.cat.codes, s) @parameterized.expand(frames.ELEMENTWISE_DATETIME_PROPERTIES) def test_dt_property(self, prop_name): # Generate a series with a lot of unique timestamps s = pd.Series( pd.date_range('1/1/2000', periods=100, freq='m') + pd.timedelta_range(start='0 days', end='70 days', periods=100)) self._run_test(lambda s: getattr(s.dt, prop_name), s) @parameterized.expand([ ('month_name', {}), ('day_name', {}), ('normalize', {}), ( 'strftime', { 'date_format': '%B %d, %Y, %r' }, ), ('tz_convert', { 'tz': 'Europe/Berlin' }), ]) def test_dt_method(self, op, kwargs): # Generate a series with a lot of unique timestamps s = pd.Series( pd.date_range( '1/1/2000', periods=100, freq='m', tz='America/Los_Angeles') + pd.timedelta_range(start='0 days', end='70 days', periods=100)) self._run_test(lambda s: getattr(s.dt, op)(**kwargs), s) def test_dt_tz_localize_ambiguous_series(self): # This replicates a dt.tz_localize doctest: # s.tz_localize('CET', ambiguous=np.array([True, True, False])) # But using a DeferredSeries instead of a np array s = pd.to_datetime( pd.Series([ '2018-10-28 01:20:00', '2018-10-28 02:36:00', '2018-10-28 03:46:00' ])) ambiguous = pd.Series([True, True, False], index=s.index) self._run_test( lambda s, ambiguous: s.dt.tz_localize('CET', ambiguous=ambiguous), s, ambiguous) def test_dt_tz_localize_nonexistent(self): # This replicates dt.tz_localize doctests that exercise `nonexistent`. # However they specify ambiguous='NaT' because the default, # ambiguous='infer', is not supported. s = pd.to_datetime( pd.Series(['2015-03-29 02:30:00', '2015-03-29 03:30:00'])) self._run_test( lambda s: s.dt.tz_localize( 'Europe/Warsaw', ambiguous='NaT', nonexistent='shift_forward'), s) self._run_test( lambda s: s.dt.tz_localize( 'Europe/Warsaw', ambiguous='NaT', nonexistent='shift_backward'), s) self._run_test( lambda s: s.dt.tz_localize( 'Europe/Warsaw', ambiguous='NaT', nonexistent=pd.Timedelta('1H')), s) class GroupByTest(_AbstractFrameTest): """Tests for DataFrame/Series GroupBy operations.""" @parameterized.expand(frames.ALL_AGGREGATIONS) def test_groupby_agg(self, agg_type): if agg_type == 'describe' and PD_VERSION < (1, 2): self.skipTest( "BEAM-12366: proxy generation of DataFrameGroupBy.describe " "fails in pandas < 1.2") self._run_test( lambda df: df.groupby('group').agg(agg_type), GROUPBY_DF, check_proxy=False) @parameterized.expand(frames.ALL_AGGREGATIONS) def test_groupby_with_filter(self, agg_type): if agg_type == 'describe' and PD_VERSION < (1, 2): self.skipTest( "BEAM-12366: proxy generation of DataFrameGroupBy.describe " "fails in pandas < 1.2") self._run_test( lambda df: getattr(df[df.foo > 30].groupby('group'), agg_type)(), GROUPBY_DF, check_proxy=False) @parameterized.expand(frames.ALL_AGGREGATIONS) def test_groupby(self, agg_type): if agg_type == 'describe' and PD_VERSION < (1, 2): self.skipTest( "BEAM-12366: proxy generation of DataFrameGroupBy.describe " "fails in pandas < 1.2") self._run_test( lambda df: getattr(df.groupby('group'), agg_type)(), GROUPBY_DF) @parameterized.expand(frames.ALL_AGGREGATIONS) @unittest.skip("Grouping by a series is not currently supported") def test_groupby_series(self, agg_type): self._run_test( lambda df: getattr(df[df.foo > 40].groupby(df.group), agg_type)(), GROUPBY_DF) def test_groupby_user_guide(self): # Example from https://pandas.pydata.org/docs/user_guide/groupby.html arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] index = pd.MultiIndex.from_arrays(arrays, names=['first', 'second']) df = pd.DataFrame({ 'A': [1, 1, 1, 1, 2, 2, 3, 3], 'B': np.arange(8) }, index=index) self._run_test(lambda df: df.groupby(['second', 'A']).sum(), df) @parameterized.expand(frames.ALL_AGGREGATIONS) def test_groupby_project_series(self, agg_type): df = GROUPBY_DF if agg_type == 'describe': self.skipTest( "BEAM-12366: proxy generation of SeriesGroupBy.describe " "fails") if agg_type in ('corr', 'cov'): self.skipTest( "BEAM-12367: SeriesGroupBy.{corr, cov} do not raise the " "expected error.") self._run_test(lambda df: getattr(df.groupby('group').foo, agg_type)(), df) self._run_test(lambda df: getattr(df.groupby('group').bar, agg_type)(), df) self._run_test( lambda df: getattr(df.groupby('group')['foo'], agg_type)(), df) self._run_test( lambda df: getattr(df.groupby('group')['bar'], agg_type)(), df) @parameterized.expand(frames.ALL_AGGREGATIONS) def test_groupby_project_dataframe(self, agg_type): if agg_type == 'describe' and PD_VERSION < (1, 2): self.skipTest( "BEAM-12366: proxy generation of DataFrameGroupBy.describe " "fails in pandas < 1.2") self._run_test( lambda df: getattr(df.groupby('group')[['bar', 'baz']], agg_type)(), GROUPBY_DF, check_proxy=False) def test_groupby_errors_bad_projection(self): df = GROUPBY_DF # non-existent projection column self._run_error_test( lambda df: df.groupby('group')[['bar', 'baz']].bar.median(), df) self._run_error_test(lambda df: df.groupby('group')[['bad']].median(), df) self._run_error_test(lambda df: df.groupby('group').bad.median(), df) self._run_error_test( lambda df: df.groupby('group')[['bar', 'baz']].bar.sum(), df) self._run_error_test(lambda df: df.groupby('group')[['bat']].sum(), df) self._run_error_test(lambda df: df.groupby('group').bat.sum(), df) def test_groupby_errors_non_existent_label(self): df = GROUPBY_DF # non-existent grouping label self._run_error_test( lambda df: df.groupby(['really_bad', 'foo', 'bad']).foo.sum(), df) self._run_error_test(lambda df: df.groupby('bad').foo.sum(), df) def test_groupby_callable(self): df = GROUPBY_DF self._run_test(lambda df: df.groupby(lambda x: x % 2).foo.sum(), df) self._run_test(lambda df: df.groupby(lambda x: x % 5).median(), df) def test_groupby_apply(self): df = GROUPBY_DF def median_sum_fn(x): return (x.foo + x.bar).median() # Note this is the same as DataFrameGroupBy.describe. Using it here is # just a convenient way to test apply() with a user fn that returns a Series describe = lambda df: df.describe() self._run_test(lambda df: df.groupby('group').foo.apply(describe), df) self._run_test( lambda df: df.groupby('group')[['foo', 'bar']].apply(describe), df) self._run_test(lambda df: df.groupby('group').apply(median_sum_fn), df) self._run_test( lambda df: df.set_index('group').foo.groupby(level=0).apply(describe), df) self._run_test(lambda df: df.groupby(level=0).apply(median_sum_fn), df) self._run_test(lambda df: df.groupby(lambda x: x % 3).apply(describe), df) self._run_test( lambda df: df.bar.groupby(lambda x: x % 3).apply(describe), df) self._run_test( lambda df: df.set_index(['str', 'group', 'bool']).groupby( level='group').apply(median_sum_fn), df) def test_groupby_apply_preserves_column_order(self): df = GROUPBY_DF self._run_test( lambda df: df[['foo', 'group', 'bar']].groupby('group').apply( lambda x: x), df) def test_groupby_transform(self): df = pd.DataFrame({ "Date": [ "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05", "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05" ], "Data": [5, 8, 6, 1, 50, 100, 60, 120], }) self._run_test(lambda df: df.groupby('Date')['Data'].transform(np.sum), df) self._run_test( lambda df: df.groupby('Date')['Data'].transform( lambda x: (x - x.mean()) / x.std()), df) def test_groupby_apply_modified_index(self): df = GROUPBY_DF # If apply fn modifies the index then the output will include the grouped # index self._run_test( lambda df: df.groupby('group').apply( lambda x: x[x.foo > x.foo.median()]), df) @unittest.skip('BEAM-11710') def test_groupby_aggregate_grouped_column(self): df = pd.DataFrame({ 'group': ['a' if i % 5 == 0 or i % 3 == 0 else 'b' for i in range(100)], 'foo': [None if i % 11 == 0 else i for i in range(100)], 'bar': [None if i % 7 == 0 else 99 - i for i in range(100)], 'baz': [None if i % 13 == 0 else i * 2 for i in range(100)], }) self._run_test(lambda df: df.groupby('group').group.count(), df) self._run_test(lambda df: df.groupby('group')[['group', 'bar']].count(), df) self._run_test( lambda df: df.groupby('group')[['group', 'bar']].apply( lambda x: x.describe()), df) @parameterized.expand((x, ) for x in [ 0, [1], 3, [0, 3], [2, 1], ['foo', 0], [1, 'str'], [3, 0, 2, 1], ]) def test_groupby_level_agg(self, level): df = GROUPBY_DF.set_index(['group', 'foo', 'bar', 'str'], drop=False) self._run_test(lambda df: df.groupby(level=level).bar.max(), df) self._run_test( lambda df: df.groupby(level=level).sum(numeric_only=True), df) self._run_test( lambda df: df.groupby(level=level).apply( lambda x: (x.foo + x.bar).median()), df) @unittest.skipIf(PD_VERSION < (1, 1), "drop_na added in pandas 1.1.0") def test_groupby_count_na(self): # Verify we can do a groupby.count() that doesn't drop NaN values self._run_test( lambda df: df.groupby('foo', dropna=True).bar.count(), GROUPBY_DF) self._run_test( lambda df: df.groupby('foo', dropna=False).bar.count(), GROUPBY_DF) def test_groupby_sum_min_count(self): df = pd.DataFrame({ 'good': [1, 2, 3, np.nan], 'bad': [np.nan, np.nan, np.nan, 4], 'group': ['a', 'b', 'a', 'b'] }) self._run_test(lambda df: df.groupby('group').sum(min_count=2), df) def test_groupby_dtypes(self): self._run_test( lambda df: df.groupby('group').dtypes, GROUPBY_DF, check_proxy=False) self._run_test( lambda df: df.groupby(level=0).dtypes, GROUPBY_DF, check_proxy=False) @parameterized.expand(frames.ALL_AGGREGATIONS) def test_dataframe_groupby_series(self, agg_type): if agg_type == 'describe' and PD_VERSION < (1, 2): self.skipTest( "BEAM-12366: proxy generation of DataFrameGroupBy.describe " "fails in pandas < 1.2") self._run_test( lambda df: df[df.foo > 40].groupby(df.group).agg(agg_type), GROUPBY_DF, check_proxy=False) self._run_test( lambda df: df[df.foo > 40].groupby(df.foo % 3).agg(agg_type), GROUPBY_DF, check_proxy=False) @parameterized.expand(frames.ALL_AGGREGATIONS) def test_series_groupby_series(self, agg_type): if agg_type == 'describe': self.skipTest( "BEAM-12366: proxy generation of SeriesGroupBy.describe " "fails") if agg_type in ('corr', 'cov'): self.skipTest( "BEAM-12367: SeriesGroupBy.{corr, cov} do not raise the " "expected error.") self._run_test( lambda df: df[df.foo < 40].bar.groupby(df.group).agg(agg_type), GROUPBY_DF) self._run_test( lambda df: df[df.foo < 40].bar.groupby(df.foo % 3).agg(agg_type), GROUPBY_DF) def test_groupby_series_apply(self): df = GROUPBY_DF def median_sum_fn(x): return (x.foo + x.bar).median() # Note this is the same as DataFrameGroupBy.describe. Using it here is # just a convenient way to test apply() with a user fn that returns a Series describe = lambda df: df.describe() self._run_test(lambda df: df.groupby(df.group).foo.apply(describe), df) self._run_test( lambda df: df.groupby(df.group)[['foo', 'bar']].apply(describe), df) self._run_test(lambda df: df.groupby(df.group).apply(median_sum_fn), df) def test_groupby_multiindex_keep_nans(self): # Due to https://github.com/pandas-dev/pandas/issues/36470 # groupby(dropna=False) doesn't work with multiple columns with self.assertRaisesRegex(NotImplementedError, "BEAM-12495"): self._run_test( lambda df: df.groupby(['foo', 'bar'], dropna=False).sum(), GROUPBY_DF) class AggregationTest(_AbstractFrameTest): """Tests for global aggregation methods on DataFrame/Series.""" # corr, cov on Series require an other argument @parameterized.expand( sorted(set(frames.ALL_AGGREGATIONS) - set(['corr', 'cov']))) def test_series_agg(self, agg_method): s = pd.Series(list(range(16))) nonparallel = agg_method in ('quantile', 'mean', 'describe', 'median') # TODO(BEAM-12379): max and min produce the wrong proxy check_proxy = agg_method not in ('max', 'min') self._run_test( lambda s: s.agg(agg_method), s, nonparallel=nonparallel, check_proxy=check_proxy) @parameterized.expand(frames.ALL_AGGREGATIONS) def test_dataframe_agg(self, agg_method): df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, 3, 5, 7]}) nonparallel = agg_method in ('quantile', 'mean', 'describe', 'median') # TODO(BEAM-12379): max and min produce the wrong proxy check_proxy = agg_method not in ('max', 'min') self._run_test( lambda df: df.agg(agg_method), df, nonparallel=nonparallel, check_proxy=check_proxy) def test_series_agg_modes(self): s = pd.Series(list(range(16))) self._run_test(lambda s: s.agg('sum'), s) self._run_test(lambda s: s.agg(['sum']), s) self._run_test(lambda s: s.agg(['sum', 'mean']), s, nonparallel=True) self._run_test(lambda s: s.agg(['mean']), s, nonparallel=True) self._run_test(lambda s: s.agg('mean'), s, nonparallel=True) def test_dataframe_agg_modes(self): df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, 3, 5, 7]}) self._run_test(lambda df: df.agg('sum'), df) self._run_test(lambda df: df.agg(['sum', 'mean']), df, nonparallel=True) self._run_test(lambda df: df.agg({'A': 'sum', 'B': 'sum'}), df) self._run_test( lambda df: df.agg({ 'A': 'sum', 'B': 'mean' }), df, nonparallel=True) self._run_test( lambda df: df.agg({'A': ['sum', 'mean']}), df, nonparallel=True) self._run_test( lambda df: df.agg({ 'A': ['sum', 'mean'], 'B': 'min' }), df, nonparallel=True) def test_series_agg_level(self): self._run_test( lambda df: df.set_index(['group', 'foo']).bar.count(level=0), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo']).bar.max(level=0), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo']).bar.median(level=0), GROUPBY_DF) self._run_test( lambda df: df.set_index(['foo', 'group']).bar.count(level=1), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo']).bar.max(level=1), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo']).bar.max(level='foo'), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo']).bar.median(level=1), GROUPBY_DF) def test_dataframe_agg_level(self): self._run_test( lambda df: df.set_index(['group', 'foo']).count(level=0), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo']).max( level=0, numeric_only=False), GROUPBY_DF, check_proxy=False) # pandas implementation doesn't respect numeric_only argument here # (https://github.com/pandas-dev/pandas/issues/40788), it # always acts as if numeric_only=True. Our implmentation respects it so we # need to make it explicit. self._run_test( lambda df: df.set_index(['group', 'foo']).sum( level=0, numeric_only=True), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo'])[['bar']].count(level=1), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo']).count(level=1), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo']).max( level=1, numeric_only=False), GROUPBY_DF, check_proxy=False) # sum with str columns is order-sensitive self._run_test( lambda df: df.set_index(['group', 'foo']).sum( level=1, numeric_only=True), GROUPBY_DF) self._run_test( lambda df: df.set_index(['group', 'foo']).median( level=0, numeric_only=True), GROUPBY_DF) self._run_test( lambda df: df.drop('str', axis=1).set_index(['foo', 'group']).median( level=1, numeric_only=True), GROUPBY_DF) def test_series_agg_multifunc_level(self): # level= is ignored for multiple agg fns self._run_test( lambda df: df.set_index(['group', 'foo']).bar.agg(['min', 'max'], level=0), GROUPBY_DF) def test_dataframe_agg_multifunc_level(self): # level= is ignored for multiple agg fns self._run_test( lambda df: df.set_index(['group', 'foo']).agg(['min', 'max'], level=0), GROUPBY_DF, check_proxy=False) @parameterized.expand([(True, ), (False, )]) @unittest.skipIf( PD_VERSION < (1, 2), "pandas 1.1.0 produces different dtypes for these examples") def test_dataframe_agg_numeric_only(self, numeric_only): # Note other aggregation functions can fail on this input with # numeric_only={False,None}. These are the only ones that actually work for # the string inputs. self._run_test( lambda df: df.max(numeric_only=numeric_only), GROUPBY_DF, check_proxy=False) self._run_test( lambda df: df.min(numeric_only=numeric_only), GROUPBY_DF, check_proxy=False) @unittest.skip( "pandas implementation doesn't respect numeric_only= with " "level= (https://github.com/pandas-dev/pandas/issues/40788)") def test_dataframe_agg_level_numeric_only(self): self._run_test( lambda df: df.set_index('foo').sum(level=0, numeric_only=True), GROUPBY_DF) self._run_test( lambda df: df.set_index('foo').max(level=0, numeric_only=True), GROUPBY_DF) self._run_test( lambda df: df.set_index('foo').mean(level=0, numeric_only=True), GROUPBY_DF) self._run_test( lambda df: df.set_index('foo').median(level=0, numeric_only=True), GROUPBY_DF) def test_dataframe_agg_bool_only(self): df = pd.DataFrame({ 'all': [True for i in range(10)], 'any': [i % 3 == 0 for i in range(10)], 'int': range(10) }) self._run_test(lambda df: df.all(), df) self._run_test(lambda df: df.any(), df) self._run_test(lambda df: df.all(bool_only=True), df) self._run_test(lambda df: df.any(bool_only=True), df) @unittest.skip( "pandas doesn't implement bool_only= with level= " "(https://github.com/pandas-dev/pandas/blob/" "v1.2.3/pandas/core/generic.py#L10573)") def test_dataframe_agg_level_bool_only(self): df = pd.DataFrame({ 'all': [True for i in range(10)], 'any': [i % 3 == 0 for i in range(10)], 'int': range(10) }) self._run_test(lambda df: df.set_index('int', drop=False).all(level=0), df) self._run_test(lambda df: df.set_index('int', drop=False).any(level=0), df) self._run_test( lambda df: df.set_index('int', drop=False).all(level=0, bool_only=True), df) self._run_test( lambda df: df.set_index('int', drop=False).any(level=0, bool_only=True), df) def test_series_agg_np_size(self): self._run_test( lambda df: df.set_index(['group', 'foo']).agg(np.size), GROUPBY_DF, check_proxy=False) def test_df_agg_invalid_kwarg_raises(self): self._run_error_test(lambda df: df.agg('mean', bool_only=True), GROUPBY_DF) self._run_error_test( lambda df: df.agg('any', numeric_only=True), GROUPBY_DF) self._run_error_test( lambda df: df.agg('median', min_count=3, numeric_only=True), GROUPBY_DF) def test_series_agg_method_invalid_kwarg_raises(self): self._run_error_test(lambda df: df.foo.median(min_count=3), GROUPBY_DF) self._run_error_test( lambda df: df.foo.agg('median', min_count=3), GROUPBY_DF) @unittest.skipIf( PD_VERSION < (1, 3), ( "DataFrame.agg raises a different exception from the " "aggregation methods. Fixed in " "https://github.com/pandas-dev/pandas/pull/40543.")) def test_df_agg_method_invalid_kwarg_raises(self): self._run_error_test(lambda df: df.mean(bool_only=True), GROUPBY_DF) self._run_error_test(lambda df: df.any(numeric_only=True), GROUPBY_DF) self._run_error_test( lambda df: df.median(min_count=3, numeric_only=True), GROUPBY_DF) def test_agg_min_count(self): df = pd.DataFrame({ 'good': [1, 2, 3, np.nan], 'bad': [np.nan, np.nan, np.nan, 4], }, index=['a', 'b', 'a', 'b']) self._run_test(lambda df: df.sum(level=0, min_count=2), df) self._run_test(lambda df: df.sum(min_count=3), df, nonparallel=True) self._run_test(lambda df: df.sum(min_count=1), df, nonparallel=True) self._run_test(lambda df: df.good.sum(min_count=2), df, nonparallel=True) self._run_test(lambda df: df.bad.sum(min_count=2), df, nonparallel=True) def test_series_agg_std(self): s = pd.Series(range(10)) self._run_test(lambda s: s.agg('std'), s) self._run_test(lambda s: s.agg('var'), s) self._run_test(lambda s: s.agg(['std', 'sum']), s) self._run_test(lambda s: s.agg(['var']), s) def test_std_all_na(self): s = pd.Series([np.nan] * 10) self._run_test(lambda s: s.agg('std'), s) self._run_test(lambda s: s.std(), s) def test_std_mostly_na_with_ddof(self): df = pd.DataFrame({ 'one': [i if i % 8 == 0 else np.nan for i in range(8)], 'two': [i if i % 4 == 0 else np.nan for i in range(8)], 'three': [i if i % 2 == 0 else np.nan for i in range(8)], }, index=pd.MultiIndex.from_arrays( [list(range(8)), list(reversed(range(8)))], names=['forward', None])) self._run_test(lambda df: df.std(), df) # ddof=1 self._run_test(lambda df: df.std(ddof=0), df) self._run_test(lambda df: df.std(ddof=2), df) self._run_test(lambda df: df.std(ddof=3), df) self._run_test(lambda df: df.std(ddof=4), df) def test_dataframe_std(self): self._run_test(lambda df: df.std(numeric_only=True), GROUPBY_DF) self._run_test(lambda df: df.var(numeric_only=True), GROUPBY_DF) def test_dataframe_mode(self): self._run_test( lambda df: df.mode(), GROUPBY_DF, nonparallel=True, check_proxy=False) self._run_test( lambda df: df.mode(numeric_only=True), GROUPBY_DF, nonparallel=True, check_proxy=False) self._run_test( lambda df: df.mode(dropna=True, numeric_only=True), GROUPBY_DF, nonparallel=True, check_proxy=False) def test_series_mode(self): self._run_test(lambda df: df.foo.mode(), GROUPBY_DF, nonparallel=True) self._run_test( lambda df: df.baz.mode(dropna=True), GROUPBY_DF, nonparallel=True) class BeamSpecificTest(unittest.TestCase): """Tests for functionality that's specific to the Beam DataFrame API. These features don't exist in pandas so we must verify them independently.""" def assert_frame_data_equivalent(self, actual, expected): """Verify that actual is the same as expected, ignoring the index and order of the data.""" def sort_and_drop_index(df): if isinstance(df, pd.Series): df = df.sort_values() elif isinstance(df, pd.DataFrame): df = df.sort_values(by=list(df.columns)) return df.reset_index(drop=True) actual = sort_and_drop_index(actual) expected = sort_and_drop_index(expected) if isinstance(expected, pd.Series): pd.testing.assert_series_equal(actual, expected) elif isinstance(expected, pd.DataFrame): pd.testing.assert_frame_equal(actual, expected) def _evaluate(self, func, *args, distributed=True): deferred_args = [ frame_base.DeferredFrame.wrap( expressions.ConstantExpression(arg, arg[0:0])) for arg in args ] session_type = ( expressions.PartitioningSession if distributed else expressions.Session) return session_type({}).evaluate(func(*deferred_args)._expr) def test_drop_duplicates_keep_any(self): df = pd.DataFrame({ 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], 'rating': [4, 4, 3.5, 15, 5] }) result = self._evaluate(lambda df: df.drop_duplicates(keep='any'), df) # Verify that the result is the same as conventional drop_duplicates self.assert_frame_data_equivalent(result, df.drop_duplicates()) def test_drop_duplicates_keep_any_subset(self): df = pd.DataFrame({ 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], 'rating': [4, 4, 3.5, 15, 5] }) result = self._evaluate( lambda df: df.drop_duplicates(keep='any', subset=['brand']), df) self.assertTrue(result.brand.unique) self.assert_frame_data_equivalent( result.brand, df.drop_duplicates(subset=['brand']).brand) def test_series_drop_duplicates_keep_any(self): df = pd.DataFrame({ 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], 'rating': [4, 4, 3.5, 15, 5] }) result = self._evaluate(lambda df: df.brand.drop_duplicates(keep='any'), df) self.assert_frame_data_equivalent(result, df.brand.drop_duplicates()) def test_duplicated_keep_any(self): df = pd.DataFrame({ 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], 'rating': [4, 4, 3.5, 15, 5] }) result = self._evaluate(lambda df: df.duplicated(keep='any'), df) # Verify that the result is the same as conventional duplicated self.assert_frame_data_equivalent(result, df.duplicated()) def test_nsmallest_any(self): df = pd.DataFrame({ 'population': [ 59000000, 65000000, 434000, 434000, 434000, 337000, 337000, 11300, 11300 ], 'GDP': [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311], 'alpha-2': ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"] }, index=[ "Italy", "France", "Malta", "Maldives", "Brunei", "Iceland", "Nauru", "Tuvalu", "Anguilla" ]) result = self._evaluate( lambda df: df.population.nsmallest(3, keep='any'), df) # keep='any' should produce the same result as keep='first', # but not necessarily with the same index self.assert_frame_data_equivalent(result, df.population.nsmallest(3)) def test_nlargest_any(self): df = pd.DataFrame({ 'population': [ 59000000, 65000000, 434000, 434000, 434000, 337000, 337000, 11300, 11300 ], 'GDP': [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311], 'alpha-2': ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"] }, index=[ "Italy", "France", "Malta", "Maldives", "Brunei", "Iceland", "Nauru", "Tuvalu", "Anguilla" ]) result = self._evaluate( lambda df: df.population.nlargest(3, keep='any'), df) # keep='any' should produce the same result as keep='first', # but not necessarily with the same index self.assert_frame_data_equivalent(result, df.population.nlargest(3)) def test_sample(self): df = pd.DataFrame({ 'population': [ 59000000, 65000000, 434000, 434000, 434000, 337000, 337000, 11300, 11300 ], 'GDP': [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311], 'alpha-2': ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"] }, index=[ "Italy", "France", "Malta", "Maldives", "Brunei", "Iceland", "Nauru", "Tuvalu", "Anguilla" ]) result = self._evaluate(lambda df: df.sample(n=3), df) self.assertEqual(len(result), 3) series_result = self._evaluate(lambda df: df.GDP.sample(n=3), df) self.assertEqual(len(series_result), 3) self.assertEqual(series_result.name, "GDP") def test_sample_with_weights(self): df = pd.DataFrame({ 'population': [ 59000000, 65000000, 434000, 434000, 434000, 337000, 337000, 11300, 11300 ], 'GDP': [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311], 'alpha-2': ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"] }, index=[ "Italy", "France", "Malta", "Maldives", "Brunei", "Iceland", "Nauru", "Tuvalu", "Anguilla" ]) weights = pd.Series([0, 0, 0, 0, 0, 0, 0, 1, 1], index=df.index) result = self._evaluate( lambda df, weights: df.sample(n=2, weights=weights), df, weights) self.assertEqual(len(result), 2) self.assertEqual(set(result.index), set(["Tuvalu", "Anguilla"])) series_result = self._evaluate( lambda df, weights: df.GDP.sample(n=2, weights=weights), df, weights) self.assertEqual(len(series_result), 2) self.assertEqual(series_result.name, "GDP") self.assertEqual(set(series_result.index), set(["Tuvalu", "Anguilla"])) def test_sample_with_missing_weights(self): df = pd.DataFrame({ 'population': [ 59000000, 65000000, 434000, 434000, 434000, 337000, 337000, 11300, 11300 ], 'GDP': [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311], 'alpha-2': ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"] }, index=[ "Italy", "France", "Malta", "Maldives", "Brunei", "Iceland", "Nauru", "Tuvalu", "Anguilla" ]) # Missing weights are treated as 0 weights = pd.Series([.1, .01, np.nan, 0], index=["Nauru", "Iceland", "Anguilla", "Italy"]) result = self._evaluate( lambda df, weights: df.sample(n=2, weights=weights), df, weights) self.assertEqual(len(result), 2) self.assertEqual(set(result.index), set(["Nauru", "Iceland"])) series_result = self._evaluate( lambda df, weights: df.GDP.sample(n=2, weights=weights), df, weights) self.assertEqual(len(series_result), 2) self.assertEqual(series_result.name, "GDP") self.assertEqual(set(series_result.index), set(["Nauru", "Iceland"])) def test_sample_with_weights_distribution(self): target_prob = 0.25 num_samples = 100 num_targets = 200 num_other_elements = 10000 target_weight = target_prob / num_targets other_weight = (1 - target_prob) / num_other_elements self.assertTrue(target_weight > other_weight * 10, "weights too close") result = self._evaluate( lambda s, weights: s.sample(n=num_samples, weights=weights).sum(), # The first elements are 1, the rest are all 0. This means that when # we sum all the sampled elements (above), the result should be the # number of times the first elements (aka targets) were sampled. pd.Series([1] * num_targets + [0] * num_other_elements), pd.Series([target_weight] * num_targets + [other_weight] * num_other_elements)) # With the above constants, the probability of violating this invariant # (as computed using the Bernoulli distribution) is about 0.0012%. expected = num_samples * target_prob self.assertTrue(expected / 3 < result < expected * 2, (expected, result)) class AllowNonParallelTest(unittest.TestCase): def _use_non_parallel_operation(self): _ = frame_base.DeferredFrame.wrap( expressions.PlaceholderExpression(pd.Series([1, 2, 3]))).replace( 'a', 'b', limit=1) def test_disallow_non_parallel(self): with self.assertRaises(expressions.NonParallelOperation): self._use_non_parallel_operation() def test_allow_non_parallel_in_context(self): with beam.dataframe.allow_non_parallel_operations(): self._use_non_parallel_operation() def test_allow_non_parallel_nesting(self): # disallowed with beam.dataframe.allow_non_parallel_operations(): # allowed self._use_non_parallel_operation() with beam.dataframe.allow_non_parallel_operations(False): # disallowed again with self.assertRaises(expressions.NonParallelOperation): self._use_non_parallel_operation() # allowed self._use_non_parallel_operation() # disallowed with self.assertRaises(expressions.NonParallelOperation): self._use_non_parallel_operation() class ConstructionTimeTest(unittest.TestCase): """Tests for operations that can be executed eagerly.""" DF = pd.DataFrame({ 'str_col': ['foo', 'bar'] * 3, 'int_col': [1, 2] * 3, 'flt_col': [1.1, 2.2] * 3, 'cat_col': pd.Series(list('aabbca'), dtype="category"), 'datetime_col': pd.Series( pd.date_range( '1/1/2000', periods=6, freq='m', tz='America/Los_Angeles')) }) DEFERRED_DF = frame_base.DeferredFrame.wrap( expressions.PlaceholderExpression(DF.iloc[:0])) def _run_test(self, fn): expected = fn(self.DF) actual = fn(self.DEFERRED_DF) if isinstance(expected, pd.Index): pd.testing.assert_index_equal(expected, actual) elif isinstance(expected, pd.Series): pd.testing.assert_series_equal(expected, actual) elif isinstance(expected, pd.DataFrame): pd.testing.assert_frame_equal(expected, actual) else: self.assertEqual(expected, actual) @parameterized.expand(DF.columns) def test_series_name(self, col_name): self._run_test(lambda df: df[col_name].name) @parameterized.expand(DF.columns) def test_series_dtype(self, col_name): self._run_test(lambda df: df[col_name].dtype) self._run_test(lambda df: df[col_name].dtypes) def test_dataframe_columns(self): self._run_test(lambda df: list(df.columns)) def test_dataframe_dtypes(self): self._run_test(lambda df: list(df.dtypes)) def test_categories(self): self._run_test(lambda df: df.cat_col.cat.categories) def test_categorical_ordered(self): self._run_test(lambda df: df.cat_col.cat.ordered) def test_groupby_ndim(self): self._run_test(lambda df: df.groupby('int_col').ndim) def test_groupby_project_ndim(self): self._run_test(lambda df: df.groupby('int_col').flt_col.ndim) self._run_test( lambda df: df.groupby('int_col')[['flt_col', 'str_col']].ndim) def test_get_column_default_None(self): # .get just returns default_value=None at construction time if the column # doesn't exist self._run_test(lambda df: df.get('FOO')) def test_datetime_tz(self): self._run_test(lambda df: df.datetime_col.dt.tz) class DocstringTest(unittest.TestCase): @parameterized.expand([ (frames.DeferredDataFrame, pd.DataFrame), (frames.DeferredSeries, pd.Series), #(frames._DeferredIndex, pd.Index), (frames._DeferredStringMethods, pd.core.strings.StringMethods), ( frames._DeferredCategoricalMethods, pd.core.arrays.categorical.CategoricalAccessor), (frames.DeferredGroupBy, pd.core.groupby.generic.DataFrameGroupBy), (frames._DeferredGroupByCols, pd.core.groupby.generic.DataFrameGroupBy), ( frames._DeferredDatetimeMethods, pd.core.indexes.accessors.DatetimeProperties), ]) def test_docs_defined(self, beam_type, pd_type): beam_attrs = set(dir(beam_type)) pd_attrs = set(dir(pd_type)) docstring_required = sorted([ attr for attr in beam_attrs.intersection(pd_attrs) if getattr(pd_type, attr).__doc__ and not attr.startswith('_') ]) docstring_missing = [ attr for attr in docstring_required if not getattr(beam_type, attr).__doc__ ] self.assertTrue( len(docstring_missing) == 0, f'{beam_type.__name__} is missing a docstring for ' f'{len(docstring_missing)}/{len(docstring_required)} ' f'({len(docstring_missing)/len(docstring_required):%}) ' f'operations:\n{docstring_missing}') class ReprTest(unittest.TestCase): def test_basic_dataframe(self): df = frame_base.DeferredFrame.wrap( expressions.ConstantExpression(GROUPBY_DF)) self.assertEqual( repr(df), ( "DeferredDataFrame(columns=['group', 'foo', 'bar', 'baz', 'bool', " "'str'], index=<unnamed>)")) def test_dataframe_with_named_index(self): df = frame_base.DeferredFrame.wrap( expressions.ConstantExpression(GROUPBY_DF.set_index('group'))) self.assertEqual( repr(df), ( "DeferredDataFrame(columns=['foo', 'bar', 'baz', 'bool', 'str'], " "index='group')")) def test_dataframe_with_partial_named_index(self): df = frame_base.DeferredFrame.wrap( expressions.ConstantExpression( GROUPBY_DF.set_index([GROUPBY_DF.index, 'group']))) self.assertEqual( repr(df), ( "DeferredDataFrame(columns=['foo', 'bar', 'baz', 'bool', 'str'], " "indexes=[<unnamed>, 'group'])")) def test_dataframe_with_named_multi_index(self): df = frame_base.DeferredFrame.wrap( expressions.ConstantExpression(GROUPBY_DF.set_index(['str', 'group']))) self.assertEqual( repr(df), ( "DeferredDataFrame(columns=['foo', 'bar', 'baz', 'bool'], " "indexes=['str', 'group'])")) def test_dataframe_with_multiple_column_levels(self): df = pd.DataFrame({ 'foofoofoo': ['one', 'one', 'one', 'two', 'two', 'two'], 'barbar': ['A', 'B', 'C', 'A', 'B', 'C'], 'bazzy': [1, 2, 3, 4, 5, 6], 'zoop': ['x', 'y', 'z', 'q', 'w', 't'] }) df = df.pivot(index='foofoofoo', columns='barbar') df = frame_base.DeferredFrame.wrap(expressions.ConstantExpression(df)) self.assertEqual( repr(df), ( "DeferredDataFrame(columns=[('bazzy', 'A'), ('bazzy', 'B'), " "('bazzy', 'C'), ('zoop', 'A'), ('zoop', 'B'), ('zoop', 'C')], " "index='foofoofoo')")) def test_dataframe_with_multiple_column_and_multiple_index_levels(self): df = pd.DataFrame({ 'foofoofoo': ['one', 'one', 'one', 'two', 'two', 'two'], 'barbar': ['A', 'B', 'C', 'A', 'B', 'C'], 'bazzy': [1, 2, 3, 4, 5, 6], 'zoop': ['x', 'y', 'z', 'q', 'w', 't'] }) df = df.pivot(index='foofoofoo', columns='barbar') df.index = [['a', 'b'], df.index] # pandas repr displays this: # bazzy zoop # barbar A B C A B C # foofoofoo # a one 1 2 3 x y z # b two 4 5 6 q w t df = frame_base.DeferredFrame.wrap(expressions.ConstantExpression(df)) self.assertEqual( repr(df), ( "DeferredDataFrame(columns=[('bazzy', 'A'), ('bazzy', 'B'), " "('bazzy', 'C'), ('zoop', 'A'), ('zoop', 'B'), ('zoop', 'C')], " "indexes=[<unnamed>, 'foofoofoo'])")) def test_basic_series(self): df = frame_base.DeferredFrame.wrap( expressions.ConstantExpression(GROUPBY_DF['bool'])) self.assertEqual( repr(df), "DeferredSeries(name='bool', dtype=bool, index=<unnamed>)") def test_series_with_named_index(self): df = frame_base.DeferredFrame.wrap( expressions.ConstantExpression(GROUPBY_DF.set_index('group')['str'])) self.assertEqual( repr(df), "DeferredSeries(name='str', dtype=object, index='group')") def test_series_with_partial_named_index(self): df = frame_base.DeferredFrame.wrap( expressions.ConstantExpression( GROUPBY_DF.set_index([GROUPBY_DF.index, 'group'])['bar'])) self.assertEqual( repr(df), ( "DeferredSeries(name='bar', dtype=float64, " "indexes=[<unnamed>, 'group'])")) def test_series_with_named_multi_index(self): df = frame_base.DeferredFrame.wrap( expressions.ConstantExpression( GROUPBY_DF.set_index(['str', 'group'])['baz'])) self.assertEqual( repr(df), "DeferredSeries(name='baz', dtype=float64, indexes=['str', 'group'])") if __name__ == '__main__': unittest.main()
apache-2.0
mne-tools/mne-tools.github.io
0.15/_downloads/plot_visualize_epochs.py
1
5046
""" .. _tut_viz_epochs: Visualize Epochs data ===================== """ import os.path as op import mne data_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample') raw = mne.io.read_raw_fif( op.join(data_path, 'sample_audvis_raw.fif'), preload=True) raw.load_data().filter(None, 9, fir_design='firwin') raw.set_eeg_reference('average', projection=True) # set EEG average reference event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3, 'visual/right': 4, 'smiley': 5, 'button': 32} events = mne.read_events(op.join(data_path, 'sample_audvis_raw-eve.fif')) epochs = mne.Epochs(raw, events, event_id=event_id, tmin=-0.2, tmax=.5) ############################################################################### # This tutorial focuses on visualization of epoched data. All of the functions # introduced here are basically high level matplotlib functions with built in # intelligence to work with epoched data. All the methods return a handle to # matplotlib figure instance. # # Events used for constructing the epochs here are the triggers for subject # being presented a smiley face at the center of the visual field. More of the # paradigm at :ref:`BABDHIFJ`. # # All plotting functions start with ``plot``. Let's start with the most # obvious. :func:`mne.Epochs.plot` offers an interactive browser that allows # rejection by hand when called in combination with a keyword ``block=True``. # This blocks the execution of the script until the browser window is closed. epochs.plot(block=True) ############################################################################### # The numbers at the top refer to the event id of the epoch. The number at the # bottom is the running numbering for the epochs. # # Since we did no artifact correction or rejection, there are epochs # contaminated with blinks and saccades. For instance, epoch number 1 seems to # be contaminated by a blink (scroll to the bottom to view the EOG channel). # This epoch can be marked for rejection by clicking on top of the browser # window. The epoch should turn red when you click it. This means that it will # be dropped as the browser window is closed. # # It is possible to plot event markers on epoched data by passing ``events`` # keyword to the epochs plotter. The events are plotted as vertical lines and # they follow the same coloring scheme as :func:`mne.viz.plot_events`. The # events plotter gives you all the events with a rough idea of the timing. # Since the colors are the same, the event plotter can also function as a # legend for the epochs plotter events. It is also possible to pass your own # colors via ``event_colors`` keyword. Here we can plot the reaction times # between seeing the smiley face and the button press (event 32). # # When events are passed, the epoch numbering at the bottom is switched off by # default to avoid overlaps. You can turn it back on via settings dialog by # pressing `o` key. You should check out `help` at the lower left corner of the # window for more information about the interactive features. events = mne.pick_events(events, include=[5, 32]) mne.viz.plot_events(events) epochs['smiley'].plot(events=events) ############################################################################### # To plot individual channels as an image, where you see all the epochs at one # glance, you can use function :func:`mne.Epochs.plot_image`. It shows the # amplitude of the signal over all the epochs plus an average (evoked response) # of the activation. We explicitly set interactive colorbar on (it is also on # by default for plotting functions with a colorbar except the topo plots). In # interactive mode you can scale and change the colormap with mouse scroll and # up/down arrow keys. You can also drag the colorbar with left/right mouse # button. Hitting space bar resets the scale. epochs.plot_image(278, cmap='interactive', sigma=1., vmin=-250, vmax=250) ############################################################################### # We can also give an overview of all channels by calculating the global # field power (or other other aggregation methods). However, combining # multiple channel types (e.g., MEG and EEG) in this way is not sensible. # Instead, we can use the ``group_by`` parameter. Setting ``group_by`` to # 'type' combines channels by type. # ``group_by`` can also be used to group channels into arbitrary groups, e.g. # regions of interests, by providing a dictionary containing # group name -> channel indices mappings. epochs.plot_image(combine='gfp', group_by='type', sigma=2., cmap="YlGnBu_r") ############################################################################### # You also have functions for plotting channelwise information arranged into a # shape of the channel array. The image plotting uses automatic scaling by # default, but noisy channels and different channel types can cause the scaling # to be a bit off. Here we define the limits by hand. epochs.plot_topo_image(vmin=-250, vmax=250, title='ERF images', sigma=2.)
bsd-3-clause
jamaps/gdal_and_ogr_scripts
geojson_vs_shp.py
4
1604
# compares the sizes of geojsons to shps for the same spatial features import os import matplotlib.pyplot as plt import numpy as np import shutil import time from osgeo import ogr from subprocess import call # temp directory for converted files os.mkdir("test_output") # shell script for shp to geojson def to_geojson(shp): name = shp.split('.')[0] call(["ogr2ogr","-f", "GeoJSON", "test_output/%s.geojson" %name, shp]) # shell script for geojson to shp def to_shp(geojson): name = geojson.split('.')[0] call(["ogr2ogr","-f", "ESRI Shapefile", "test_output/%s.shp" %name, geojson]) geojson_list = [] shp_list = [] # loop through shps in directory, converting using functions and storing file sizes for f in os.listdir('.'): if os.path.isfile(f) and f.endswith(('.shp')): print f print f.split('.')[0] to_geojson(f) g = "%s" %f to_shp(g) shp_size = (os.stat('test_output/%s.shp' %f.split('.')[0])).st_size dbf_size = (os.stat('test_output/%s.dbf' %f.split('.')[0])).st_size shx_size = (os.stat('test_output/%s.shx' %f.split('.')[0])).st_size geojson_size = (os.stat('test_output/%s.geojson' %f.split('.')[0])).st_size shapefile_size = shp_size + dbf_size + shx_size geojson_list.append(float(geojson_size)/1000000) shp_list.append(float(shapefile_size)/1000000) shutil.rmtree("test_output") print "---------" # plot results with pyplot print geojson_list print shp_list plt.plot(geojson_list,shp_list,'ro') plt.plot([0,20],[0,20]) plt.axis([0, 20, 0, 20]) plt.show()
mit
KaiSzuttor/espresso
testsuite/scripts/tutorials/test_08-visualization.py
3
1294
# Copyright (C) 2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest as ut import importlib_wrapper def disable_visualizer_GUI(code): breakpoint = "t = Thread(target=main)" assert breakpoint in code code = code.split(breakpoint, 1)[0] + "main()" return code tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import( "@TUTORIALS_DIR@/08-visualization/08-visualization.py", substitutions=disable_visualizer_GUI, int_n_times=5, int_steps=100, matplotlib_notebook=False) @skipIfMissingFeatures class Tutorial(ut.TestCase): system = tutorial.system if __name__ == "__main__": ut.main()
gpl-3.0
jedman/dedalus-leakylid
dedalus_plots.py
1
1400
import numpy as np import matplotlib.pyplot as plt import h5py plt.rcParams['image.cmap'] = 'RdBu_r' plt.rcParams.update({'font.size': 16}) def read_vars(data, vars ): '''read in var in vars from data (h5pyfile) and return dict selected_vars''' selected_vars = {} for key, varname in vars.items(): # read in data tmp = data['tasks'][varname][:] # make a new dictionary with data selected_vars[key] = tmp return selected_vars def read_dims(data): '''read in x, z, t dims from data (h5pyfile) and return dict all_dims''' all_dims = {} all_dims['x'] = data['scales/x/1.0'][:] all_dims['z'] = data['scales/z/1.0'][:] all_dims['t'] = data['scales']['sim_time'][:] return all_dims def make_1D_plot(filename,dim, **kwargs): ''' make a line plot''' plt.clf() styles = ['-','--','-.'] ctr = 0 for name, var in kwargs.items(): plt.plot(dim, var, label = name, lw = 2, ls = styles[ctr], marker = 'x') ctr += 1 plt.legend() plt.savefig(filename) plt.clf() return def make_2D_plot(filename,dims,var, title = '', xlabel = '', ylabel = ''): ''' make a pcolormesh plot ''' plt.pcolormesh(dims[0] , dims[1], var) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.colorbar() plt.savefig(filename) plt.clf() return
gpl-2.0
pprett/scikit-learn
sklearn/mixture/tests/test_gaussian_mixture.py
19
40215
# Author: Wei Xue <[email protected]> # Thierry Guillemot <[email protected]> # License: BSD 3 clauseimport warnings import sys import warnings import numpy as np from scipy import stats, linalg from sklearn.covariance import EmpiricalCovariance from sklearn.datasets.samples_generator import make_spd_matrix from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.metrics.cluster import adjusted_rand_score from sklearn.mixture.gaussian_mixture import GaussianMixture from sklearn.mixture.gaussian_mixture import ( _estimate_gaussian_covariances_full, _estimate_gaussian_covariances_tied, _estimate_gaussian_covariances_diag, _estimate_gaussian_covariances_spherical) from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky from sklearn.mixture.gaussian_mixture import _compute_log_det_cholesky from sklearn.exceptions import ConvergenceWarning, NotFittedError from sklearn.utils.extmath import fast_logdet from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical'] def generate_data(n_samples, n_features, weights, means, precisions, covariance_type): rng = np.random.RandomState(0) X = [] if covariance_type == 'spherical': for _, (w, m, c) in enumerate(zip(weights, means, precisions['spherical'])): X.append(rng.multivariate_normal(m, c * np.eye(n_features), int(np.round(w * n_samples)))) if covariance_type == 'diag': for _, (w, m, c) in enumerate(zip(weights, means, precisions['diag'])): X.append(rng.multivariate_normal(m, np.diag(c), int(np.round(w * n_samples)))) if covariance_type == 'tied': for _, (w, m) in enumerate(zip(weights, means)): X.append(rng.multivariate_normal(m, precisions['tied'], int(np.round(w * n_samples)))) if covariance_type == 'full': for _, (w, m, c) in enumerate(zip(weights, means, precisions['full'])): X.append(rng.multivariate_normal(m, c, int(np.round(w * n_samples)))) X = np.vstack(X) return X class RandomData(object): def __init__(self, rng, n_samples=500, n_components=2, n_features=2, scale=50): self.n_samples = n_samples self.n_components = n_components self.n_features = n_features self.weights = rng.rand(n_components) self.weights = self.weights / self.weights.sum() self.means = rng.rand(n_components, n_features) * scale self.covariances = { 'spherical': .5 + rng.rand(n_components), 'diag': (.5 + rng.rand(n_components, n_features)) ** 2, 'tied': make_spd_matrix(n_features, random_state=rng), 'full': np.array([ make_spd_matrix(n_features, random_state=rng) * .5 for _ in range(n_components)])} self.precisions = { 'spherical': 1. / self.covariances['spherical'], 'diag': 1. / self.covariances['diag'], 'tied': linalg.inv(self.covariances['tied']), 'full': np.array([linalg.inv(covariance) for covariance in self.covariances['full']])} self.X = dict(zip(COVARIANCE_TYPE, [generate_data( n_samples, n_features, self.weights, self.means, self.covariances, covar_type) for covar_type in COVARIANCE_TYPE])) self.Y = np.hstack([k * np.ones(int(np.round(w * n_samples))) for k, w in enumerate(self.weights)]) def test_gaussian_mixture_attributes(): # test bad parameters rng = np.random.RandomState(0) X = rng.rand(10, 2) n_components_bad = 0 gmm = GaussianMixture(n_components=n_components_bad) assert_raise_message(ValueError, "Invalid value for 'n_components': %d " "Estimation requires at least one component" % n_components_bad, gmm.fit, X) # covariance_type should be in [spherical, diag, tied, full] covariance_type_bad = 'bad_covariance_type' gmm = GaussianMixture(covariance_type=covariance_type_bad) assert_raise_message(ValueError, "Invalid value for 'covariance_type': %s " "'covariance_type' should be in " "['spherical', 'tied', 'diag', 'full']" % covariance_type_bad, gmm.fit, X) tol_bad = -1 gmm = GaussianMixture(tol=tol_bad) assert_raise_message(ValueError, "Invalid value for 'tol': %.5f " "Tolerance used by the EM must be non-negative" % tol_bad, gmm.fit, X) reg_covar_bad = -1 gmm = GaussianMixture(reg_covar=reg_covar_bad) assert_raise_message(ValueError, "Invalid value for 'reg_covar': %.5f " "regularization on covariance must be " "non-negative" % reg_covar_bad, gmm.fit, X) max_iter_bad = 0 gmm = GaussianMixture(max_iter=max_iter_bad) assert_raise_message(ValueError, "Invalid value for 'max_iter': %d " "Estimation requires at least one iteration" % max_iter_bad, gmm.fit, X) n_init_bad = 0 gmm = GaussianMixture(n_init=n_init_bad) assert_raise_message(ValueError, "Invalid value for 'n_init': %d " "Estimation requires at least one run" % n_init_bad, gmm.fit, X) init_params_bad = 'bad_method' gmm = GaussianMixture(init_params=init_params_bad) assert_raise_message(ValueError, "Unimplemented initialization method '%s'" % init_params_bad, gmm.fit, X) # test good parameters n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1 covariance_type, init_params = 'full', 'random' gmm = GaussianMixture(n_components=n_components, tol=tol, n_init=n_init, max_iter=max_iter, reg_covar=reg_covar, covariance_type=covariance_type, init_params=init_params).fit(X) assert_equal(gmm.n_components, n_components) assert_equal(gmm.covariance_type, covariance_type) assert_equal(gmm.tol, tol) assert_equal(gmm.reg_covar, reg_covar) assert_equal(gmm.max_iter, max_iter) assert_equal(gmm.n_init, n_init) assert_equal(gmm.init_params, init_params) def test_check_X(): from sklearn.mixture.base import _check_X rng = np.random.RandomState(0) n_samples, n_components, n_features = 10, 2, 2 X_bad_dim = rng.rand(n_components - 1, n_features) assert_raise_message(ValueError, 'Expected n_samples >= n_components ' 'but got n_components = %d, n_samples = %d' % (n_components, X_bad_dim.shape[0]), _check_X, X_bad_dim, n_components) X_bad_dim = rng.rand(n_components, n_features + 1) assert_raise_message(ValueError, 'Expected the input data X have %d features, ' 'but got %d features' % (n_features, X_bad_dim.shape[1]), _check_X, X_bad_dim, n_components, n_features) X = rng.rand(n_samples, n_features) assert_array_equal(X, _check_X(X, n_components, n_features)) def test_check_weights(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components = rand_data.n_components X = rand_data.X['full'] g = GaussianMixture(n_components=n_components) # Check bad shape weights_bad_shape = rng.rand(n_components, 1) g.weights_init = weights_bad_shape assert_raise_message(ValueError, "The parameter 'weights' should have the shape of " "(%d,), but got %s" % (n_components, str(weights_bad_shape.shape)), g.fit, X) # Check bad range weights_bad_range = rng.rand(n_components) + 1 g.weights_init = weights_bad_range assert_raise_message(ValueError, "The parameter 'weights' should be in the range " "[0, 1], but got max value %.5f, min value %.5f" % (np.min(weights_bad_range), np.max(weights_bad_range)), g.fit, X) # Check bad normalization weights_bad_norm = rng.rand(n_components) weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1) g.weights_init = weights_bad_norm assert_raise_message(ValueError, "The parameter 'weights' should be normalized, " "but got sum(weights) = %.5f" % np.sum(weights_bad_norm), g.fit, X) # Check good weights matrix weights = rand_data.weights g = GaussianMixture(weights_init=weights, n_components=n_components) g.fit(X) assert_array_equal(weights, g.weights_init) def test_check_means(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components, n_features = rand_data.n_components, rand_data.n_features X = rand_data.X['full'] g = GaussianMixture(n_components=n_components) # Check means bad shape means_bad_shape = rng.rand(n_components + 1, n_features) g.means_init = means_bad_shape assert_raise_message(ValueError, "The parameter 'means' should have the shape of ", g.fit, X) # Check good means matrix means = rand_data.means g.means_init = means g.fit(X) assert_array_equal(means, g.means_init) def test_check_precisions(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components, n_features = rand_data.n_components, rand_data.n_features # Define the bad precisions for each covariance_type precisions_bad_shape = { 'full': np.ones((n_components + 1, n_features, n_features)), 'tied': np.ones((n_features + 1, n_features + 1)), 'diag': np.ones((n_components + 1, n_features)), 'spherical': np.ones((n_components + 1))} # Define not positive-definite precisions precisions_not_pos = np.ones((n_components, n_features, n_features)) precisions_not_pos[0] = np.eye(n_features) precisions_not_pos[0, 0, 0] = -1. precisions_not_positive = { 'full': precisions_not_pos, 'tied': precisions_not_pos[0], 'diag': -1. * np.ones((n_components, n_features)), 'spherical': -1. * np.ones(n_components)} not_positive_errors = { 'full': 'symmetric, positive-definite', 'tied': 'symmetric, positive-definite', 'diag': 'positive', 'spherical': 'positive'} for covar_type in COVARIANCE_TYPE: X = RandomData(rng).X[covar_type] g = GaussianMixture(n_components=n_components, covariance_type=covar_type, random_state=rng) # Check precisions with bad shapes g.precisions_init = precisions_bad_shape[covar_type] assert_raise_message(ValueError, "The parameter '%s precision' should have " "the shape of" % covar_type, g.fit, X) # Check not positive precisions g.precisions_init = precisions_not_positive[covar_type] assert_raise_message(ValueError, "'%s precision' should be %s" % (covar_type, not_positive_errors[covar_type]), g.fit, X) # Check the correct init of precisions_init g.precisions_init = rand_data.precisions[covar_type] g.fit(X) assert_array_equal(rand_data.precisions[covar_type], g.precisions_init) def test_suffstat_sk_full(): # compare the precision matrix compute from the # EmpiricalCovariance.covariance fitted on X*sqrt(resp) # with _sufficient_sk_full, n_components=1 rng = np.random.RandomState(0) n_samples, n_features = 500, 2 # special case 1, assuming data is "centered" X = rng.rand(n_samples, n_features) resp = rng.rand(n_samples, 1) X_resp = np.sqrt(resp) * X nk = np.array([n_samples]) xk = np.zeros((1, n_features)) covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) ecov = EmpiricalCovariance(assume_centered=True) ecov.fit(X_resp) assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0) assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full') precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) assert_array_almost_equal(precs_est, precs_pred) # special case 2, assuming resp are all ones resp = np.ones((n_samples, 1)) nk = np.array([n_samples]) xk = X.mean(axis=0).reshape((1, -1)) covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) ecov = EmpiricalCovariance(assume_centered=False) ecov.fit(X) assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0) assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full') precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) assert_array_almost_equal(precs_est, precs_pred) def test_suffstat_sk_tied(): # use equation Nk * Sk / N = S_tied rng = np.random.RandomState(0) n_samples, n_features, n_components = 500, 2, 2 resp = rng.rand(n_samples, n_components) resp = resp / resp.sum(axis=1)[:, np.newaxis] X = rng.rand(n_samples, n_features) nk = resp.sum(axis=0) xk = np.dot(resp.T, X) / nk[:, np.newaxis] covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full, 0) / n_samples covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0) ecov = EmpiricalCovariance() ecov.covariance_ = covars_pred_full assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0) assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, 'tied') precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T) precs_est = linalg.inv(covars_pred_tied) assert_array_almost_equal(precs_est, precs_pred) def test_suffstat_sk_diag(): # test against 'full' case rng = np.random.RandomState(0) n_samples, n_features, n_components = 500, 2, 2 resp = rng.rand(n_samples, n_components) resp = resp / resp.sum(axis=1)[:, np.newaxis] X = rng.rand(n_samples, n_features) nk = resp.sum(axis=0) xk = np.dot(resp.T, X) / nk[:, np.newaxis] covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0) ecov = EmpiricalCovariance() for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag): ecov.covariance_ = np.diag(np.diag(cov_full)) cov_diag = np.diag(cov_diag) assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0) assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, 'diag') assert_almost_equal(covars_pred_diag, 1. / precs_chol_pred ** 2) def test_gaussian_suffstat_sk_spherical(): # computing spherical covariance equals to the variance of one-dimension # data after flattening, n_components=1 rng = np.random.RandomState(0) n_samples, n_features = 500, 2 X = rng.rand(n_samples, n_features) X = X - X.mean() resp = np.ones((n_samples, 1)) nk = np.array([n_samples]) xk = X.mean() covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0) covars_pred_spherical2 = (np.dot(X.flatten().T, X.flatten()) / (n_features * n_samples)) assert_almost_equal(covars_pred_spherical, covars_pred_spherical2) # check the precision computation precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical, 'spherical') assert_almost_equal(covars_pred_spherical, 1. / precs_chol_pred ** 2) def test_compute_log_det_cholesky(): n_features = 2 rand_data = RandomData(np.random.RandomState(0)) for covar_type in COVARIANCE_TYPE: covariance = rand_data.covariances[covar_type] if covar_type == 'full': predected_det = np.array([linalg.det(cov) for cov in covariance]) elif covar_type == 'tied': predected_det = linalg.det(covariance) elif covar_type == 'diag': predected_det = np.array([np.prod(cov) for cov in covariance]) elif covar_type == 'spherical': predected_det = covariance ** n_features # We compute the cholesky decomposition of the covariance matrix expected_det = _compute_log_det_cholesky(_compute_precision_cholesky( covariance, covar_type), covar_type, n_features=n_features) assert_array_almost_equal(expected_det, - .5 * np.log(predected_det)) def _naive_lmvnpdf_diag(X, means, covars): resp = np.empty((len(X), len(means))) stds = np.sqrt(covars) for i, (mean, std) in enumerate(zip(means, stds)): resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1) return resp def test_gaussian_mixture_log_probabilities(): from sklearn.mixture.gaussian_mixture import _estimate_log_gaussian_prob # test aginst with _naive_lmvnpdf_diag rng = np.random.RandomState(0) rand_data = RandomData(rng) n_samples = 500 n_features = rand_data.n_features n_components = rand_data.n_components means = rand_data.means covars_diag = rng.rand(n_components, n_features) X = rng.rand(n_samples, n_features) log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag) # full covariances precs_full = np.array([np.diag(1. / np.sqrt(x)) for x in covars_diag]) log_prob = _estimate_log_gaussian_prob(X, means, precs_full, 'full') assert_array_almost_equal(log_prob, log_prob_naive) # diag covariances precs_chol_diag = 1. / np.sqrt(covars_diag) log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, 'diag') assert_array_almost_equal(log_prob, log_prob_naive) # tied covars_tied = np.array([x for x in covars_diag]).mean(axis=0) precs_tied = np.diag(np.sqrt(1. / covars_tied)) log_prob_naive = _naive_lmvnpdf_diag(X, means, [covars_tied] * n_components) log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, 'tied') assert_array_almost_equal(log_prob, log_prob_naive) # spherical covars_spherical = covars_diag.mean(axis=1) precs_spherical = 1. / np.sqrt(covars_diag.mean(axis=1)) log_prob_naive = _naive_lmvnpdf_diag(X, means, [[k] * n_features for k in covars_spherical]) log_prob = _estimate_log_gaussian_prob(X, means, precs_spherical, 'spherical') assert_array_almost_equal(log_prob, log_prob_naive) # skip tests on weighted_log_probabilities, log_weights def test_gaussian_mixture_estimate_log_prob_resp(): # test whether responsibilities are normalized rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=5) n_samples = rand_data.n_samples n_features = rand_data.n_features n_components = rand_data.n_components X = rng.rand(n_samples, n_features) for covar_type in COVARIANCE_TYPE: weights = rand_data.weights means = rand_data.means precisions = rand_data.precisions[covar_type] g = GaussianMixture(n_components=n_components, random_state=rng, weights_init=weights, means_init=means, precisions_init=precisions, covariance_type=covar_type) g.fit(X) resp = g.predict_proba(X) assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples)) assert_array_equal(g.weights_init, weights) assert_array_equal(g.means_init, means) assert_array_equal(g.precisions_init, precisions) def test_gaussian_mixture_predict_predict_proba(): rng = np.random.RandomState(0) rand_data = RandomData(rng) for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] Y = rand_data.Y g = GaussianMixture(n_components=rand_data.n_components, random_state=rng, weights_init=rand_data.weights, means_init=rand_data.means, precisions_init=rand_data.precisions[covar_type], covariance_type=covar_type) # Check a warning message arrive if we don't do fit assert_raise_message(NotFittedError, "This GaussianMixture instance is not fitted " "yet. Call 'fit' with appropriate arguments " "before using this method.", g.predict, X) g.fit(X) Y_pred = g.predict(X) Y_pred_proba = g.predict_proba(X).argmax(axis=1) assert_array_equal(Y_pred, Y_pred_proba) assert_greater(adjusted_rand_score(Y, Y_pred), .95) def test_gaussian_mixture_fit(): # recover the ground truth rng = np.random.RandomState(0) rand_data = RandomData(rng) n_features = rand_data.n_features n_components = rand_data.n_components for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] g = GaussianMixture(n_components=n_components, n_init=20, reg_covar=0, random_state=rng, covariance_type=covar_type) g.fit(X) # needs more data to pass the test with rtol=1e-7 assert_allclose(np.sort(g.weights_), np.sort(rand_data.weights), rtol=0.1, atol=1e-2) arg_idx1 = g.means_[:, 0].argsort() arg_idx2 = rand_data.means[:, 0].argsort() assert_allclose(g.means_[arg_idx1], rand_data.means[arg_idx2], rtol=0.1, atol=1e-2) if covar_type == 'full': prec_pred = g.precisions_ prec_test = rand_data.precisions['full'] elif covar_type == 'tied': prec_pred = np.array([g.precisions_] * n_components) prec_test = np.array([rand_data.precisions['tied']] * n_components) elif covar_type == 'spherical': prec_pred = np.array([np.eye(n_features) * c for c in g.precisions_]) prec_test = np.array([np.eye(n_features) * c for c in rand_data.precisions['spherical']]) elif covar_type == 'diag': prec_pred = np.array([np.diag(d) for d in g.precisions_]) prec_test = np.array([np.diag(d) for d in rand_data.precisions['diag']]) arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort() arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort() for k, h in zip(arg_idx1, arg_idx2): ecov = EmpiricalCovariance() ecov.covariance_ = prec_test[h] # the accuracy depends on the number of data and randomness, rng assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.1) def test_gaussian_mixture_fit_best_params(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components = rand_data.n_components n_init = 10 for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type) ll = [] for _ in range(n_init): g.fit(X) ll.append(g.score(X)) ll = np.array(ll) g_best = GaussianMixture(n_components=n_components, n_init=n_init, reg_covar=0, random_state=rng, covariance_type=covar_type) g_best.fit(X) assert_almost_equal(ll.min(), g_best.score(X)) def test_gaussian_mixture_fit_convergence_warning(): rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=1) n_components = rand_data.n_components max_iter = 1 for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] g = GaussianMixture(n_components=n_components, n_init=1, max_iter=max_iter, reg_covar=0, random_state=rng, covariance_type=covar_type) assert_warns_message(ConvergenceWarning, 'Initialization %d did not converge. ' 'Try different init parameters, ' 'or increase max_iter, tol ' 'or check for degenerate data.' % max_iter, g.fit, X) def test_multiple_init(): # Test that multiple inits does not much worse than a single one rng = np.random.RandomState(0) n_samples, n_features, n_components = 50, 5, 2 X = rng.randn(n_samples, n_features) for cv_type in COVARIANCE_TYPE: train1 = GaussianMixture(n_components=n_components, covariance_type=cv_type, random_state=rng).fit(X).score(X) train2 = GaussianMixture(n_components=n_components, covariance_type=cv_type, random_state=rng, n_init=5).fit(X).score(X) assert_greater_equal(train2, train1) def test_gaussian_mixture_n_parameters(): # Test that the right number of parameters is estimated rng = np.random.RandomState(0) n_samples, n_features, n_components = 50, 5, 2 X = rng.randn(n_samples, n_features) n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41} for cv_type in COVARIANCE_TYPE: g = GaussianMixture( n_components=n_components, covariance_type=cv_type, random_state=rng).fit(X) assert_equal(g._n_parameters(), n_params[cv_type]) def test_bic_1d_1component(): # Test all of the covariance_types return the same BIC score for # 1-dimensional, 1 component fits. rng = np.random.RandomState(0) n_samples, n_dim, n_components = 100, 1, 1 X = rng.randn(n_samples, n_dim) bic_full = GaussianMixture(n_components=n_components, covariance_type='full', random_state=rng).fit(X).bic(X) for covariance_type in ['tied', 'diag', 'spherical']: bic = GaussianMixture(n_components=n_components, covariance_type=covariance_type, random_state=rng).fit(X).bic(X) assert_almost_equal(bic_full, bic) def test_gaussian_mixture_aic_bic(): # Test the aic and bic criteria rng = np.random.RandomState(0) n_samples, n_features, n_components = 50, 3, 2 X = rng.randn(n_samples, n_features) # standard gaussian entropy sgh = 0.5 * (fast_logdet(np.cov(X.T, bias=1)) + n_features * (1 + np.log(2 * np.pi))) for cv_type in COVARIANCE_TYPE: g = GaussianMixture( n_components=n_components, covariance_type=cv_type, random_state=rng, max_iter=200) g.fit(X) aic = 2 * n_samples * sgh + 2 * g._n_parameters() bic = (2 * n_samples * sgh + np.log(n_samples) * g._n_parameters()) bound = n_features / np.sqrt(n_samples) assert_true((g.aic(X) - aic) / n_samples < bound) assert_true((g.bic(X) - bic) / n_samples < bound) def test_gaussian_mixture_verbose(): rng = np.random.RandomState(0) rand_data = RandomData(rng) n_components = rand_data.n_components for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type, verbose=1) h = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type, verbose=2) old_stdout = sys.stdout sys.stdout = StringIO() try: g.fit(X) h.fit(X) finally: sys.stdout = old_stdout def test_warm_start(): random_state = 0 rng = np.random.RandomState(random_state) n_samples, n_features, n_components = 500, 2, 2 X = rng.rand(n_samples, n_features) # Assert the warm_start give the same result for the same number of iter g = GaussianMixture(n_components=n_components, n_init=1, max_iter=2, reg_covar=0, random_state=random_state, warm_start=False) h = GaussianMixture(n_components=n_components, n_init=1, max_iter=1, reg_covar=0, random_state=random_state, warm_start=True) with warnings.catch_warnings(): warnings.simplefilter("ignore", ConvergenceWarning) g.fit(X) score1 = h.fit(X).score(X) score2 = h.fit(X).score(X) assert_almost_equal(g.weights_, h.weights_) assert_almost_equal(g.means_, h.means_) assert_almost_equal(g.precisions_, h.precisions_) assert_greater(score2, score1) # Assert that by using warm_start we can converge to a good solution g = GaussianMixture(n_components=n_components, n_init=1, max_iter=5, reg_covar=0, random_state=random_state, warm_start=False, tol=1e-6) h = GaussianMixture(n_components=n_components, n_init=1, max_iter=5, reg_covar=0, random_state=random_state, warm_start=True, tol=1e-6) with warnings.catch_warnings(): warnings.simplefilter("ignore", ConvergenceWarning) g.fit(X) h.fit(X).fit(X) assert_true(not g.converged_) assert_true(h.converged_) def test_score(): covar_type = 'full' rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7) n_components = rand_data.n_components X = rand_data.X[covar_type] # Check the error message if we don't call fit gmm1 = GaussianMixture(n_components=n_components, n_init=1, max_iter=1, reg_covar=0, random_state=rng, covariance_type=covar_type) assert_raise_message(NotFittedError, "This GaussianMixture instance is not fitted " "yet. Call 'fit' with appropriate arguments " "before using this method.", gmm1.score, X) # Check score value with warnings.catch_warnings(): warnings.simplefilter("ignore", ConvergenceWarning) gmm1.fit(X) gmm_score = gmm1.score(X) gmm_score_proba = gmm1.score_samples(X).mean() assert_almost_equal(gmm_score, gmm_score_proba) # Check if the score increase gmm2 = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type).fit(X) assert_greater(gmm2.score(X), gmm1.score(X)) def test_score_samples(): covar_type = 'full' rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7) n_components = rand_data.n_components X = rand_data.X[covar_type] # Check the error message if we don't call fit gmm = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0, random_state=rng, covariance_type=covar_type) assert_raise_message(NotFittedError, "This GaussianMixture instance is not fitted " "yet. Call 'fit' with appropriate arguments " "before using this method.", gmm.score_samples, X) gmm_score_samples = gmm.fit(X).score_samples(X) assert_equal(gmm_score_samples.shape[0], rand_data.n_samples) def test_monotonic_likelihood(): # We check that each step of the EM without regularization improve # monotonically the training set likelihood rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7) n_components = rand_data.n_components for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] gmm = GaussianMixture(n_components=n_components, covariance_type=covar_type, reg_covar=0, warm_start=True, max_iter=1, random_state=rng, tol=1e-7) current_log_likelihood = -np.infty with warnings.catch_warnings(): warnings.simplefilter("ignore", ConvergenceWarning) # Do one training iteration at a time so we can make sure that the # training log likelihood increases after each iteration. for _ in range(600): prev_log_likelihood = current_log_likelihood try: current_log_likelihood = gmm.fit(X).score(X) except ConvergenceWarning: pass assert_greater_equal(current_log_likelihood, prev_log_likelihood) if gmm.converged_: break assert_true(gmm.converged_) def test_regularisation(): # We train the GaussianMixture on degenerate data by defining two clusters # of a 0 covariance. rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = np.vstack((np.ones((n_samples // 2, n_features)), np.zeros((n_samples // 2, n_features)))) for covar_type in COVARIANCE_TYPE: gmm = GaussianMixture(n_components=n_samples, reg_covar=0, covariance_type=covar_type, random_state=rng) with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) assert_raise_message(ValueError, "Fitting the mixture model failed because " "some components have ill-defined empirical " "covariance (for instance caused by " "singleton or collapsed samples). Try to " "decrease the number of components, or " "increase reg_covar.", gmm.fit, X) gmm.set_params(reg_covar=1e-6).fit(X) def test_property(): rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7) n_components = rand_data.n_components for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] gmm = GaussianMixture(n_components=n_components, covariance_type=covar_type, random_state=rng, n_init=5) gmm.fit(X) if covar_type == 'full': for prec, covar in zip(gmm.precisions_, gmm.covariances_): assert_array_almost_equal(linalg.inv(prec), covar) elif covar_type == 'tied': assert_array_almost_equal(linalg.inv(gmm.precisions_), gmm.covariances_) else: assert_array_almost_equal(gmm.precisions_, 1. / gmm.covariances_) def test_sample(): rng = np.random.RandomState(0) rand_data = RandomData(rng, scale=7, n_components=3) n_features, n_components = rand_data.n_features, rand_data.n_components for covar_type in COVARIANCE_TYPE: X = rand_data.X[covar_type] gmm = GaussianMixture(n_components=n_components, covariance_type=covar_type, random_state=rng) # To sample we need that GaussianMixture is fitted assert_raise_message(NotFittedError, "This GaussianMixture instance " "is not fitted", gmm.sample, 0) gmm.fit(X) assert_raise_message(ValueError, "Invalid value for 'n_samples", gmm.sample, 0) # Just to make sure the class samples correctly n_samples = 20000 X_s, y_s = gmm.sample(n_samples) for k in range(n_components): if covar_type == 'full': assert_array_almost_equal(gmm.covariances_[k], np.cov(X_s[y_s == k].T), decimal=1) elif covar_type == 'tied': assert_array_almost_equal(gmm.covariances_, np.cov(X_s[y_s == k].T), decimal=1) elif covar_type == 'diag': assert_array_almost_equal(gmm.covariances_[k], np.diag(np.cov(X_s[y_s == k].T)), decimal=1) else: assert_array_almost_equal( gmm.covariances_[k], np.var(X_s[y_s == k] - gmm.means_[k]), decimal=1) means_s = np.array([np.mean(X_s[y_s == k], 0) for k in range(n_components)]) assert_array_almost_equal(gmm.means_, means_s, decimal=1) # Check shapes of sampled data, see # https://github.com/scikit-learn/scikit-learn/issues/7701 assert_equal(X_s.shape, (n_samples, n_features)) for sample_size in range(1, 100): X_s, _ = gmm.sample(sample_size) assert_equal(X_s.shape, (sample_size, n_features)) @ignore_warnings(category=ConvergenceWarning) def test_init(): # We check that by increasing the n_init number we have a better solution random_state = 0 rand_data = RandomData(np.random.RandomState(random_state), scale=1) n_components = rand_data.n_components X = rand_data.X['full'] gmm1 = GaussianMixture(n_components=n_components, n_init=1, max_iter=1, random_state=random_state).fit(X) gmm2 = GaussianMixture(n_components=n_components, n_init=100, max_iter=1, random_state=random_state).fit(X) assert_greater(gmm2.lower_bound_, gmm1.lower_bound_)
bsd-3-clause
shyamalschandra/scikit-learn
sklearn/neighbors/tests/test_ball_tree.py
159
10196
import pickle import numpy as np from numpy.testing import assert_array_almost_equal from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap, simultaneous_sort, kernel_norm, nodeheap_sort, DTYPE, ITYPE) from sklearn.neighbors.dist_metrics import DistanceMetric from sklearn.utils.testing import SkipTest, assert_allclose rng = np.random.RandomState(10) V = rng.rand(3, 3) V = np.dot(V, V.T) DIMENSION = 3 METRICS = {'euclidean': {}, 'manhattan': {}, 'minkowski': dict(p=3), 'chebyshev': {}, 'seuclidean': dict(V=np.random.random(DIMENSION)), 'wminkowski': dict(p=3, w=np.random.random(DIMENSION)), 'mahalanobis': dict(V=V)} DISCRETE_METRICS = ['hamming', 'canberra', 'braycurtis'] BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath'] def dist_func(x1, x2, p): return np.sum((x1 - x2) ** p) ** (1. / p) def brute_force_neighbors(X, Y, k, metric, **kwargs): D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X) ind = np.argsort(D, axis=1)[:, :k] dist = D[np.arange(Y.shape[0])[:, None], ind] return dist, ind def test_ball_tree_query(): np.random.seed(0) X = np.random.random((40, DIMENSION)) Y = np.random.random((10, DIMENSION)) def check_neighbors(dualtree, breadth_first, k, metric, kwargs): bt = BallTree(X, leaf_size=1, metric=metric, **kwargs) dist1, ind1 = bt.query(Y, k, dualtree=dualtree, breadth_first=breadth_first) dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs) # don't check indices here: if there are any duplicate distances, # the indices may not match. Distances should not have this problem. assert_array_almost_equal(dist1, dist2) for (metric, kwargs) in METRICS.items(): for k in (1, 3, 5): for dualtree in (True, False): for breadth_first in (True, False): yield (check_neighbors, dualtree, breadth_first, k, metric, kwargs) def test_ball_tree_query_boolean_metrics(): np.random.seed(0) X = np.random.random((40, 10)).round(0) Y = np.random.random((10, 10)).round(0) k = 5 def check_neighbors(metric): bt = BallTree(X, leaf_size=1, metric=metric) dist1, ind1 = bt.query(Y, k) dist2, ind2 = brute_force_neighbors(X, Y, k, metric) assert_array_almost_equal(dist1, dist2) for metric in BOOLEAN_METRICS: yield check_neighbors, metric def test_ball_tree_query_discrete_metrics(): np.random.seed(0) X = (4 * np.random.random((40, 10))).round(0) Y = (4 * np.random.random((10, 10))).round(0) k = 5 def check_neighbors(metric): bt = BallTree(X, leaf_size=1, metric=metric) dist1, ind1 = bt.query(Y, k) dist2, ind2 = brute_force_neighbors(X, Y, k, metric) assert_array_almost_equal(dist1, dist2) for metric in DISCRETE_METRICS: yield check_neighbors, metric def test_ball_tree_query_radius(n_samples=100, n_features=10): np.random.seed(0) X = 2 * np.random.random(size=(n_samples, n_features)) - 1 query_pt = np.zeros(n_features, dtype=float) eps = 1E-15 # roundoff error can cause test to fail bt = BallTree(X, leaf_size=5) rad = np.sqrt(((X - query_pt) ** 2).sum(1)) for r in np.linspace(rad[0], rad[-1], 100): ind = bt.query_radius([query_pt], r + eps)[0] i = np.where(rad <= r + eps)[0] ind.sort() i.sort() assert_array_almost_equal(i, ind) def test_ball_tree_query_radius_distance(n_samples=100, n_features=10): np.random.seed(0) X = 2 * np.random.random(size=(n_samples, n_features)) - 1 query_pt = np.zeros(n_features, dtype=float) eps = 1E-15 # roundoff error can cause test to fail bt = BallTree(X, leaf_size=5) rad = np.sqrt(((X - query_pt) ** 2).sum(1)) for r in np.linspace(rad[0], rad[-1], 100): ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True) ind = ind[0] dist = dist[0] d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1)) assert_array_almost_equal(d, dist) def compute_kernel_slow(Y, X, kernel, h): d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) norm = kernel_norm(h, X.shape[1], kernel) if kernel == 'gaussian': return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1) elif kernel == 'tophat': return norm * (d < h).sum(-1) elif kernel == 'epanechnikov': return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1) elif kernel == 'exponential': return norm * (np.exp(-d / h)).sum(-1) elif kernel == 'linear': return norm * ((1 - d / h) * (d < h)).sum(-1) elif kernel == 'cosine': return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1) else: raise ValueError('kernel not recognized') def test_ball_tree_kde(n_samples=100, n_features=3): np.random.seed(0) X = np.random.random((n_samples, n_features)) Y = np.random.random((n_samples, n_features)) bt = BallTree(X, leaf_size=10) for kernel in ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']: for h in [0.01, 0.1, 1]: dens_true = compute_kernel_slow(Y, X, kernel, h) def check_results(kernel, h, atol, rtol, breadth_first): dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol, kernel=kernel, breadth_first=breadth_first) assert_allclose(dens, dens_true, atol=atol, rtol=max(rtol, 1e-7)) for rtol in [0, 1E-5]: for atol in [1E-6, 1E-2]: for breadth_first in (True, False): yield (check_results, kernel, h, atol, rtol, breadth_first) def test_gaussian_kde(n_samples=1000): # Compare gaussian KDE results to scipy.stats.gaussian_kde from scipy.stats import gaussian_kde np.random.seed(0) x_in = np.random.normal(0, 1, n_samples) x_out = np.linspace(-5, 5, 30) for h in [0.01, 0.1, 1]: bt = BallTree(x_in[:, None]) try: gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in)) except TypeError: raise SkipTest("Old version of scipy, doesn't accept " "explicit bandwidth.") dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples dens_gkde = gkde.evaluate(x_out) assert_array_almost_equal(dens_bt, dens_gkde, decimal=3) def test_ball_tree_two_point(n_samples=100, n_features=3): np.random.seed(0) X = np.random.random((n_samples, n_features)) Y = np.random.random((n_samples, n_features)) r = np.linspace(0, 1, 10) bt = BallTree(X, leaf_size=10) D = DistanceMetric.get_metric("euclidean").pairwise(Y, X) counts_true = [(D <= ri).sum() for ri in r] def check_two_point(r, dualtree): counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree) assert_array_almost_equal(counts, counts_true) for dualtree in (True, False): yield check_two_point, r, dualtree def test_ball_tree_pickle(): np.random.seed(0) X = np.random.random((10, 3)) bt1 = BallTree(X, leaf_size=1) # Test if BallTree with callable metric is picklable bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2) ind1, dist1 = bt1.query(X) ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X) def check_pickle_protocol(protocol): s = pickle.dumps(bt1, protocol=protocol) bt2 = pickle.loads(s) s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol) bt2_pyfunc = pickle.loads(s_pyfunc) ind2, dist2 = bt2.query(X) ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X) assert_array_almost_equal(ind1, ind2) assert_array_almost_equal(dist1, dist2) assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc) assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc) for protocol in (0, 1, 2): yield check_pickle_protocol, protocol def test_neighbors_heap(n_pts=5, n_nbrs=10): heap = NeighborsHeap(n_pts, n_nbrs) for row in range(n_pts): d_in = np.random.random(2 * n_nbrs).astype(DTYPE) i_in = np.arange(2 * n_nbrs, dtype=ITYPE) for d, i in zip(d_in, i_in): heap.push(row, d, i) ind = np.argsort(d_in) d_in = d_in[ind] i_in = i_in[ind] d_heap, i_heap = heap.get_arrays(sort=True) assert_array_almost_equal(d_in[:n_nbrs], d_heap[row]) assert_array_almost_equal(i_in[:n_nbrs], i_heap[row]) def test_node_heap(n_nodes=50): vals = np.random.random(n_nodes).astype(DTYPE) i1 = np.argsort(vals) vals2, i2 = nodeheap_sort(vals) assert_array_almost_equal(i1, i2) assert_array_almost_equal(vals[i1], vals2) def test_simultaneous_sort(n_rows=10, n_pts=201): dist = np.random.random((n_rows, n_pts)).astype(DTYPE) ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE) dist2 = dist.copy() ind2 = ind.copy() # simultaneous sort rows using function simultaneous_sort(dist, ind) # simultaneous sort rows using numpy i = np.argsort(dist2, axis=1) row_ind = np.arange(n_rows)[:, None] dist2 = dist2[row_ind, i] ind2 = ind2[row_ind, i] assert_array_almost_equal(dist, dist2) assert_array_almost_equal(ind, ind2) def test_query_haversine(): np.random.seed(0) X = 2 * np.pi * np.random.random((40, 2)) bt = BallTree(X, leaf_size=1, metric='haversine') dist1, ind1 = bt.query(X, k=5) dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine') assert_array_almost_equal(dist1, dist2) assert_array_almost_equal(ind1, ind2)
bsd-3-clause
zhujianwei31415/dcnnfold
scripts/utility/pydendroheatmap.py
2
17660
# # The MIT License (MIT) # # Copyright (c) 2015 Matthew Antalek Jr # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # github: https://github.com/themantalope/pydendroheatmap import matplotlib.pyplot as pylab import matplotlib as mpl import scipy.cluster.hierarchy as sch import numpy as np class DendroHeatMap(object): """ Class for quickly and easily plotting heatmaps with dendrograms on the side, as seen in http://code.activestate.com/recipes/578175-hierarchical-clustering-heatmap-python/ """ def __init__(self, heat_map_data=None, left_dendrogram=None,top_dendrogram=None, window_height=10, window_width = 14, color_bar_width = 0.015, left_dendro_x=0.05,left_dendro_y=0.22,left_dendro_width=0.2,left_dendro_height=0.6, left_dendro_x_distance_to_row_cb=0.004, left_dendro_y_distance_to_col_cb=0.004, top_dendro_x=0.273, top_dendro_y=0.843, top_dendro_width=0.5, top_dendro_height=0.117, row_cb_x=0.254,row_cb_y=0.22,row_cb_width=0.015,row_cb_height=0.6,row_cb_on=True, col_cb_x = 0.273, col_cb_y=0.824, col_cb_width=0.5, col_cb_height=0.015, col_cb_on=True, heat_x=0.273, heat_y=0.22,heat_width=0.5,heat_height=0.6, color_legend_x=0.07,color_legend_y=0.88, color_legend_width=0.2,color_legend_height=0.09, color_legend_ticks=7, row_labels=None, max_row_labels=150, row_labels_size=8, col_labels=None, max_col_labels=100, col_labels_size=8, verbose=False): self.figure = None self.verbose= verbose # print 'should be moving into setter land....' self.heat_map_data = heat_map_data self.top_dendrogram = top_dendrogram self.left_dendrogram = left_dendrogram #set the default behaviors self.window_height=window_height self.window_width=window_width self.color_bar_width=color_bar_width self.left_dendro_x=left_dendro_x self.left_dendro_y=left_dendro_y self.left_dendro_width=left_dendro_width self.left_dendro_height=left_dendro_height self.left_dendro_x_distance_to_row_cb=left_dendro_x_distance_to_row_cb self.left_dendro_y_distance_to_col_cb=left_dendro_y_distance_to_col_cb self.top_dendro_x=top_dendro_x self.top_dendro_y=top_dendro_y self.top_dendro_width = top_dendro_width self.top_dendro_height=top_dendro_height self.cluster_cb_colors = mpl.colors.ListedColormap(['r', 'g', 'b', 'y', 'w', 'k', 'm']) self.row_cb_x=row_cb_x self.row_cb_y = row_cb_y self.row_cb_width=row_cb_width self.row_cb_height=row_cb_height self.row_cb_on=row_cb_on self.col_cb_x=col_cb_x self.col_cb_y=col_cb_y self.col_cb_width=col_cb_width self.col_cb_height=col_cb_height self.col_cb_on=col_cb_on self.heat_x=heat_x self.heat_y=heat_y self.heat_width=heat_width self.heat_height=heat_height self.color_legend_x=color_legend_x self.color_legend_y=color_legend_y self.color_legend_width=color_legend_width self.color_legend_height=color_legend_height self.color_legend_ticks = color_legend_ticks self.row_labels=row_labels self.row_labels_size=row_labels_size self.max_row_labels=max_row_labels self.col_labels=col_labels self.col_labels_size=col_labels_size self.max_col_labels=max_col_labels self.redBlackBlue=self.__RedBlackBlue() self.redBlackSkyBlue=self.__RedBlackSkyBlue() self.redBlackGreen=self.__RedBlackGreen() self.yellowBlackBlue=self.__YellowBlackBlue() self.colormap=self.redBlackGreen self.left_dendro_title = '' self.top_dendro_title = '' self.title = '' self.color_legend_title = '' self.plotRendered = False self.exportDPI = 100 def render_plot(self,showFrames=False): self.resetPlot() if(self.verbose): print 'Rendering plot...' self.figure = pylab.figure(figsize=[self.window_width, self.window_height]) #plot the top dendrogram if(not self.top_dendrogram is None): self.top_dendro_axes = self.figure.add_axes([self.top_dendro_x, self.top_dendro_y, self.top_dendro_width, self.top_dendro_height], frame_on=showFrames) self.top_dendro_plot = sch.dendrogram(self.top_dendrogram) self.top_dendro_axes.set_xticks([]) self.top_dendro_axes.set_yticks([]) self.top_dendro_axes.set_title(self.top_dendro_title) #plot the left dendrogram if(not self.left_dendrogram is None): self.left_dendro_axes = self.figure.add_axes([self.left_dendro_x, self.left_dendro_y, self.left_dendro_width, self.left_dendro_height], frame_on=showFrames) self.left_dendro_plot = sch.dendrogram(self.left_dendrogram,orientation='left') self.left_dendro_axes.set_xticks([]) self.left_dendro_axes.set_yticks([]) self.left_dendro_axes.set_title(self.left_dendro_title,rotation='vertical') #plot the heat map if(not self.heat_map_data is None): self.heat_map_axes = self.figure.add_axes([self.heat_x, self.heat_y, self.heat_width, self.heat_height], frame_on=showFrames) self.heat_map_plot = self.heat_map_axes.matshow(self.heat_map_data, aspect='auto', origin='lower', cmap=self.colormap, norm=self.cmap_norm) self.heat_map_axes.set_xticks([]) self.heat_map_axes.set_yticks([]) self.heat_map_rows = self.heat_map_data.shape[0] self.heat_map_cols = self.heat_map_data.shape[1] #add the from the labels to the figure # print len(self.row_labels) for i in range(0, self.heat_map_rows): if(self.row_labels): if(len(self.row_labels) < self.max_row_labels): self.heat_map_axes.text(self.heat_map_cols-0.5, i-0.5, ' '+self.row_labels[i], size=self.row_labels_size) for i in range(0, self.heat_map_cols): if(self.col_labels): if(len(self.col_labels) < self.max_col_labels): self.heat_map_axes.text(i+0.05, self.heat_map_rows-self.heat_map_rows-0.5, ' '+self.col_labels[i], size=self.col_labels_size, rotation=270,verticalalignment='top') #plot the column colorbar if(not self.top_dendrogram is None): self.col_cb_axes = self.figure.add_axes([self.col_cb_x, self.col_cb_y, self.col_cb_width, self.col_cb_height], frame_on=True) # print self.top_colorbar_labels.shape # print 'Col cb' # print [self.col_cb_x, self.col_cb_y, self.col_cb_width, self.col_cb_height] self.col_cb_plot = self.col_cb_axes.matshow(self.top_colorbar_labels,aspect='auto',origin='lower',cmap=self.cluster_cb_colors) self.col_cb_axes.set_xticks([]) self.col_cb_axes.set_yticks([]) #plot the row colorbar if(not self.left_dendrogram is None): self.row_cb_axes = self.figure.add_axes([self.row_cb_x, self.row_cb_y, self.row_cb_width, self.row_cb_height], frame_on=True) # print self.left_colorbar_labels.shape # print 'Row cb' # print [self.row_cb_x, self.row_cb_y, self.row_cb_width, self.row_cb_height] self.row_cb_plot = self.row_cb_axes.matshow(self.left_colorbar_labels, aspect='auto',origin='lower',cmap=self.cluster_cb_colors) self.row_cb_axes.set_xticks([]) self.row_cb_axes.set_yticks([]) #plot the color legend if(not self.heat_map_data is None): self.color_legend_axes = self.figure.add_axes([self.color_legend_x, self.color_legend_y, self.color_legend_width, self.color_legend_height], frame_on=showFrames) self.color_legend_plot = mpl.colorbar.ColorbarBase(self.color_legend_axes, cmap=self.colormap, norm=self.cmap_norm,orientation='horizontal') tl=mpl.ticker.MaxNLocator(nbins=self.color_legend_ticks) self.color_legend_plot.locator = tl self.color_legend_plot.update_ticks() self.color_legend_axes.set_title(self.color_legend_title) self.heat_map_axes.format_coord = self.__formatCoords self.figure.suptitle(self.title) self.plotRendered = True if(self.verbose): print( 'Plot rendered...') def show(self): self.resetPlot() self.render_plot() pylab.show() def export(self,filename): self.resetPlot() if('.' not in filename): filename += '.png' else: if(self.verbose): print ('Saving plot to: ', filename) self.render_plot() pylab.savefig(filename,dpi=self.exportDPI) @property def heat_map_data(self): return self.__heat_map_data @heat_map_data.setter def heat_map_data(self, heat_map_data): # print 'In the setter...' self.__heat_map_data=heat_map_data self.resetPlot() # print type(heat_map_data) if((isinstance(heat_map_data,np.ndarray)) | (isinstance(heat_map_data,np.matrix))): hm_min = heat_map_data.min() hm_max = heat_map_data.max() self.cmap_norm = mpl.colors.Normalize(hm_min,hm_max) else: raise TypeError('Data for the heatmap must be a numpy.ndarray or numpy.matrix object!') def resetPlot(self): self.plotRendered = False if(self.figure): pylab.close(self.figure) self.figure = None else: self.figure = None @property def figure(self): return self.__figure @figure.setter def figure(self,figure): self.__figure = figure if((not isinstance(figure, pylab.Figure)) & (isinstance(figure,object))): #this force's the figure to either be "None" type or a pylab.Figure object self.__figure = None @property def row_labels(self): return self.__row_labels @row_labels.setter def row_labels(self, row_labels): if(not isinstance(self.heat_map_data,np.ndarray) or not isinstance(self.heat_map_data, np.matrix)): if(self.verbose): print ("""Warning: data for heat map not yet specified, be sure that the number of elements in row_labels is equal to the number of rows in heat_map_data. """) self.__row_labels = row_labels else: if(len(row_labels) != self.heat_map_data.shape[0]): print ("""Invalid entry for row_labels. Please be sure that the number of elements in row_labels is equal to the number of rows in heat_map_data.""") self.__row_labels = None else: self.__row_labels = row_labels @property def col_labels(self): return self.__col_labels @col_labels.setter def col_labels(self, col_labels): if(not isinstance(self.heat_map_data,np.ndarray) or not isinstance(self.heat_map_data, np.matrix)): if(self.verbose): print ("""Warning: data for heat map not yet specified, be sure that the number of elements in col_labels is equal to the number of columns in heat_map_data. """) self.__col_labels = col_labels else: if(len(col_labels) != self.heat_map_data.shape[0]): print ("""Invalid entry for col_labels. Please be sure that the number of elements in col_labels is equal to the number of columns in heat_map_data.""") self.__col_labels = None else: self.__col_labels = col_labels @property def colormap(self): return self.__colormap @colormap.setter def colormap(self, colormap): self.__colormap = colormap self.resetPlot() @property def top_dendrogram(self): return self.__top_dendrogram @top_dendrogram.setter def top_dendrogram(self,top_dendrogram): if(isinstance(top_dendrogram,np.ndarray)): self.__top_dendrogram = top_dendrogram self.resetPlot() self.top_colorbar_labels = np.array(sch.fcluster(top_dendrogram,0.7*max(top_dendrogram[:,2]),'distance'),dtype=int) self.top_colorbar_labels.shape = (1,len(self.top_colorbar_labels)) temp_dendro = sch.dendrogram(top_dendrogram,no_plot=True) self.top_colorbar_labels = self.top_colorbar_labels[:,temp_dendro['leaves']] elif top_dendrogram is None: self.__top_dendrogram = top_dendrogram self.resetPlot() else: raise TypeError('Dendrograms must be a n-1 x 4 numpy.ndarray as per the scipy.cluster.hierarchy implementation!') @property def left_dendrogram(self): return self.__left_dendrogram @left_dendrogram.setter def left_dendrogram(self,left_dendrogram): if isinstance(left_dendrogram,np.ndarray): self.__left_dendrogram = left_dendrogram self.resetPlot() self.left_colorbar_labels = np.array(sch.fcluster(left_dendrogram,0.7 * max(left_dendrogram[:,2]),'distance'), dtype=int) self.left_colorbar_labels.shape = (len(self.left_colorbar_labels),1) temp_dendro = sch.dendrogram(left_dendrogram,no_plot=True) self.left_colorbar_labels = self.left_colorbar_labels[temp_dendro['leaves'],:] elif left_dendrogram is None: self.__left_dendrogram = left_dendrogram self.resetPlot() else: raise TypeError('Dendrograms must be a n-1 x 4 numpy.ndarray as per the scipy.cluster.hierarchy implementation!') def __RedBlackSkyBlue(self): cdict = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.9), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0)) } my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256) return my_cmap def __RedBlackBlue(self): cdict = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0)) } my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256) return my_cmap def __RedBlackGreen(self): cdict = {'red': ((0.0, 0.0, 0.0), (0.05, 0.0, 0.1), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), 'green': ((0.0, 0.0, 1.0), (0.25, 0.1, 0.0), (1.0, 0.0, 0.0)) } my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256) return my_cmap def __YellowBlackBlue(self): cdict = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.8), (0.5, 0.1, 0.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0)) } ### yellow is created by adding y = 1 to RedBlackSkyBlue green last tuple ### modulate between blue and cyan using the last y var in the first green tuple my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256) return my_cmap def __formatCoords(self, x,y): col = int(x+0.5) row = int(y+0.5) if col>=0 and col<self.heat_map_cols and row>=0 and row<self.heat_map_rows: z = self.heat_map_data[row,col] return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z) else: return 'x=%1.4f, y=%1.4f'%(x, y)
gpl-3.0
Gorbagzog/StageIAP
Behroozi18SMF.py
1
4893
#!/usr/bin/env python3 # -*-coding:Utf-8 -* """Script to compute the SMF from Behroozi et al. 2013 and Behroozi et al. 2018""" import numpy as np import matplotlib.pyplot as plt from colossus.cosmology import cosmology from colossus.lss import mass_function from MCMC_SHMR_main import * """Load HMF""" # redshiftsbin = np.array([0.37, 0.668, 0.938, 1.286, 1.735, 2.220, 2.683, 3.271, 3.926, 4.803]) global redshiftsbin redshiftsbin = np.array([0.1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) numzbin = redshiftsbin.size hmf=[] mdef='200m' hmf_name = 'tinker08' cosmo = cosmology.setCosmology('planck15') redshift_haloes = redshiftsbin """Use the Colossus module for the HMF""" print('Use '+hmf_name+' HMF in Planck15 cosmo from Colossus module') if hmf_name == ('watson13' or 'bhattacharya11'): print(hmf_name) mdef='fof' else: mdef = '200m' print('Use '+mdef+' for the SO defintion.') redshift_haloes = redshiftsbin log_Mh = np.linspace(10, 15, num=1000) M = 10**log_Mh * cosmo.h for i in range(numzbin): hmf.append( np.transpose( np.array( [np.log10(M / cosmo.h), np.log10(mass_function.massFunction( M, redshift_haloes[i], mdef = mdef, model =hmf_name, q_out = 'dndlnM' ) * np.log(10) * cosmo.h**3 ## Mass functions are in h^3 Mpc^-3, and need to multiply by ln(10) to have dndlog10m )] ) ) ) """Behroozi 2018""" """Parameters""" paramfile = '../Data/umachine-edr/data/smhm/params/smhm_med_params.txt' # -> the one showed on the plots of B18 # paramfile = '../Data/umachine-edr/data/smhm/params/smhm_med_params.txt' # Load params param_file = open(paramfile, "r") param_list = [] allparams = [] for line in param_file: param_list.append(float((line.split(" "))[1])) allparams.append(line.split(" ")) if (len(param_list) != 20): print("Parameter file not correct length. (Expected 20 lines, got %d)." % len(param_list)) quit() names = "EFF_0 EFF_0_A EFF_0_A2 EFF_0_Z M_1 M_1_A M_1_A2 M_1_Z ALPHA ALPHA_A ALPHA_A2 ALPHA_Z BETA BETA_A BETA_Z DELTA GAMMA GAMMA_A GAMMA_Z CHI2".split(" "); params18 = dict(zip(names, param_list)) """Functions""" def af(z): return 1 / (1+z) def log_M1(a, z): return params18['M_1'] + params18['M_1_A'] * (a - 1.) - params18['M_1_A2'] * np.log(a) + params18['M_1_Z'] * z def eps(a, z): return params18['EFF_0'] + params18['EFF_0_A'] * (a - 1.) - params18['EFF_0_A2'] * np.log(a) + params18['EFF_0_Z'] * z def alpha(a, z): return params18['ALPHA'] + params18['ALPHA_A'] * (a - 1.) - params18['ALPHA_A2'] * np.log(a) + params18['ALPHA_Z'] * z def beta(a, z): return params18['BETA'] + params18['BETA_A'] * (a - 1.) + params18['BETA_Z'] * z def delta(): return params18['DELTA'] def log_gamma(a, z): return params18['GAMMA'] + params18['GAMMA_A'] * (a - 1.) + params18['GAMMA_Z'] * z def log_Ms18(log_Mh, z): a = af(z) print(log_M1(a, z)) x = log_Mh - log_M1(a, z) return log_M1(a, z) + eps(a, z) - np.log10(10**(-alpha(a, z) * x) + 10**(-beta(a, z) * x)) + 10**(log_gamma(a, z)) * np.exp(-0.5 * (x / delta())**2) def log_phi18(log_Mh, idx_z, z, params): epsilon = 0.001 log_Ms1 = log_Ms18(log_Mh, z) log_Ms2 = log_Ms18(log_Mh + epsilon, z) # Select the index of the HMF corresponding to the halo masses index_Mh = np.argmin( np.abs( np.tile(hmf[idx_z][:, 0], (len(log_Mh), 1)) - np.transpose(np.tile(log_Mh, (len(hmf[idx_z][:, 0]), 1))) ), axis=1) # if np.any(hmf[idx_z][index_Mh, 1] < -100): # # print('HMF not defined') # return log_Ms1 * 0. + 1 # else: log_phidirect = hmf[idx_z][index_Mh, 1] - np.log10((log_Ms2 - log_Ms1)/epsilon) return log_phidirect def log_phi18_true(log_Mh, idx_z, z, params): epsilon = 0.0001 * log_Mh ksi=0.1 logphi1 =log_phi18(log_Mh, idx_z, z, params) logphi2 = log_phi18(log_Mh + epsilon, idx_z, z, params) logphitrue = logphi1 + ksi**2 / 2 * np.log(10) * ((logphi2 - logphi1)/epsilon)**2 return logphitrue """Compute log_Ms and phi corresponding to a given log_Mh and HMF""" # log_Mh = np.linspace(10, 15, num=1000) # for idx_z in range(redshiftsbin.size): # # for idx_z in range(1): # z = redshiftsbin[idx_z] # log_phi = log_phi18(log_Mh, idx_z, z, params18) # # print(log_phi) # plt.plot(log_Ms18(log_Mh, z), log_phi, label='z='+str(z)) # # plt.plot(log_Mh, log_Ms18(log_Mh, z)) # plt.ylim(-7, 0) # plt.legend() # plt.show() z = np.linspace(0, 10) a= af(z) plt.figure() plt.plot(z, log_M1(a, z) ) plt.show() plt.figure() plt.plot(z, eps(a, z) ) plt.show() plt.figure() plt.plot(z, alpha(a, z) ) plt.show() plt.figure() plt.plot(z, beta(a, z) ) plt.show() plt.figure() plt.plot(z, log_gamma(a, z) ) plt.show()
gpl-3.0
PHOTOX/fuase
ase/ase/dft/stm.py
1
6010
import pickle import numpy as np class STM: def __init__(self, atoms, symmetries=None): """Scanning tunneling microscope. atoms: Atoms object or filename Atoms to scan or name of file to read LDOS from. symmetries: list of int List of integers 0, 1, and/or 2 indicating which surface symmetries have been used to reduce the number of k-points for the DFT calculation. The three integers correspond to the following three symmetry operations:: [-1 0] [ 1 0] [ 0 1] [ 0 1] [ 0 -1] [ 1 0] """ if isinstance(atoms, str): with open(atoms, 'rb') as f: self.ldos, self.bias, self.cell = pickle.load(f) self.atoms = None else: self.atoms = atoms self.cell = atoms.cell self.bias = None self.ldos = None assert not self.cell[2, :2].any() and not self.cell[:2, 2].any() self.symmetries = symmetries or [] def calculate_ldos(self, bias): """Calculate local density of states for given bias.""" if self.ldos is not None and bias == self.bias: return if bias < 0: emin = bias emax = 0.0 else: emin = 0 emax = bias calc = self.atoms.calc nbands = calc.get_number_of_bands() weights = calc.get_k_point_weights() nkpts = len(weights) nspins = calc.get_number_of_spins() eigs = np.array([[calc.get_eigenvalues(k, s) for k in range(nkpts)] for s in range(nspins)]) eigs -= calc.get_fermi_level() ldos = 0.0 for s in range(nspins): for k in range(nkpts): for n in range(nbands): e = eigs[s, k, n] if emin < e < emax: psi = calc.get_pseudo_wave_function(n, k, s) ldos += weights[k] * (psi * np.conj(psi)).real if 0 in self.symmetries: # (x,y) -> (-x,y) ldos[1:] += ldos[:0:-1].copy() ldos[1:] *= 0.5 if 1 in self.symmetries: # (x,y) -> (x,-y) ldos[:, 1:] += ldos[:, :0:-1].copy() ldos[:, 1:] *= 0.5 if 2 in self.symmetries: # (x,y) -> (y,x) ldos += ldos.transpose((1, 0, 2)).copy() ldos *= 0.5 self.ldos = ldos self.bias = bias def write(self, filename='stm.pckl'): """Write local density of states to pickle file.""" with open(filename, 'wb') as f: pickle.dump((self.ldos, self.bias, self.cell), f, protocol=pickle.HIGHEST_PROTOCOL) def get_averaged_current(self, bias, z): """Calculate avarage current at height z. Use this to get an idea of what current to use when scanning.""" self.calculate_ldos(bias) nz = self.ldos.shape[2] # Find grid point: n = z / self.cell[2, 2] * nz dn = n - np.floor(n) n = int(n) % nz # Average and do linear interpolation: return ((1 - dn) * self.ldos[:, :, n].mean() + dn * self.ldos[:, :, (n + 1) % nz].mean()) def scan(self, bias, current, z0=None, repeat=(1, 1)): """Constant current 2-d scan. Returns three 2-d arrays (x, y, z) containing x-coordinates, y-coordinates and heights. These three arrays can be passed to matplotlibs contourf() function like this: >>> import matplotlib.pyplot as plt >>> plt.gca(aspect='equal') >>> plt.contourf(x, y, z) >>> plt.show() """ self.calculate_ldos(bias) L = self.cell[2, 2] nz = self.ldos.shape[2] h = L / nz ldos = self.ldos.reshape((-1, nz)) heights = np.empty(ldos.shape[0]) for i, a in enumerate(ldos): heights[i] = find_height(a, current, h, z0) s0 = heights.shape = self.ldos.shape[:2] heights = np.tile(heights, repeat) s = heights.shape ij = np.indices(s, dtype=float).reshape((2, -1)).T x, y = np.dot(ij / s0, self.cell[:2, :2]).T.reshape((2,) + s) return x, y, heights def linescan(self, bias, current, p1, p2, npoints=50, z0=None): """Constant current line scan. Example:: stm = STM(...) z = ... # tip position c = stm.get_averaged_current(-1.0, z) stm.linescan(-1.0, c, (1.2, 0.0), (1.2, 3.0)) """ heights = self.scan(bias, current, z0)[2] p1 = np.asarray(p1, float) p2 = np.asarray(p2, float) d = p2 - p1 s = np.dot(d, d)**0.5 cell = self.cell[:2, :2] shape = np.array(heights.shape, float) M = np.linalg.inv(cell) line = np.empty(npoints) for i in range(npoints): p = p1 + i * d / (npoints - 1) q = np.dot(p, M) * shape line[i] = interpolate(q, heights) return np.linspace(0, s, npoints), line def interpolate(q, heights): qi = q.astype(int) f = q - qi g = 1 - f qi %= heights.shape n0, m0 = qi n1, m1 = (qi + 1) % heights.shape z = (g[0] * g[1] * heights[n0, m0] + f[0] * g[1] * heights[n1, m0] + g[0] * f[1] * heights[n0, m1] + f[0] * f[1] * heights[n1, m1]) return z def find_height(ldos, current, h, z0=None): if z0 is None: n = len(ldos) - 1 else: n = int(z0 / h) while n >= 0: if ldos[n] > current: break n -= 1 else: raise RuntimeError('Tip crash!') c2, c1 = ldos[n:n + 2] return (n + 1 - (current - c1) / (c2 - c1)) * h
gpl-2.0
mne-tools/mne-tools.github.io
0.22/_downloads/ae7d4d6bcae82f99a78c3f8a0c94f7b0/plot_mne_inverse_envelope_correlation.py
7
4203
""" .. _ex-envelope-correlation: ============================================= Compute envelope correlations in source space ============================================= Compute envelope correlations of orthogonalized activity :footcite:`HippEtAl2012,KhanEtAl2018` in source space using resting state CTF data. """ # Authors: Eric Larson <[email protected]> # Sheraz Khan <[email protected]> # Denis Engemann <[email protected]> # # License: BSD (3-clause) import os.path as op import numpy as np import matplotlib.pyplot as plt import mne from mne.connectivity import envelope_correlation from mne.minimum_norm import make_inverse_operator, apply_inverse_epochs from mne.preprocessing import compute_proj_ecg, compute_proj_eog data_path = mne.datasets.brainstorm.bst_resting.data_path() subjects_dir = op.join(data_path, 'subjects') subject = 'bst_resting' trans = op.join(data_path, 'MEG', 'bst_resting', 'bst_resting-trans.fif') src = op.join(subjects_dir, subject, 'bem', subject + '-oct-6-src.fif') bem = op.join(subjects_dir, subject, 'bem', subject + '-5120-bem-sol.fif') raw_fname = op.join(data_path, 'MEG', 'bst_resting', 'subj002_spontaneous_20111102_01_AUX.ds') ############################################################################## # Here we do some things in the name of speed, such as crop (which will # hurt SNR) and downsample. Then we compute SSP projectors and apply them. raw = mne.io.read_raw_ctf(raw_fname, verbose='error') raw.crop(0, 60).pick_types(meg=True, eeg=False).load_data().resample(80) raw.apply_gradient_compensation(3) projs_ecg, _ = compute_proj_ecg(raw, n_grad=1, n_mag=2) projs_eog, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='MLT31-4407') raw.info['projs'] += projs_ecg raw.info['projs'] += projs_eog raw.apply_proj() cov = mne.compute_raw_covariance(raw) # compute before band-pass of interest ############################################################################## # Now we band-pass filter our data and create epochs. raw.filter(14, 30) events = mne.make_fixed_length_events(raw, duration=5.) epochs = mne.Epochs(raw, events=events, tmin=0, tmax=5., baseline=None, reject=dict(mag=8e-13), preload=True) del raw ############################################################################## # Compute the forward and inverse # ------------------------------- src = mne.read_source_spaces(src) fwd = mne.make_forward_solution(epochs.info, trans, src, bem) inv = make_inverse_operator(epochs.info, fwd, cov) del fwd, src ############################################################################## # Compute label time series and do envelope correlation # ----------------------------------------------------- labels = mne.read_labels_from_annot(subject, 'aparc_sub', subjects_dir=subjects_dir) epochs.apply_hilbert() # faster to apply in sensor space stcs = apply_inverse_epochs(epochs, inv, lambda2=1. / 9., pick_ori='normal', return_generator=True) label_ts = mne.extract_label_time_course( stcs, labels, inv['src'], return_generator=True) corr = envelope_correlation(label_ts, verbose=True) # let's plot this matrix fig, ax = plt.subplots(figsize=(4, 4)) ax.imshow(corr, cmap='viridis', clim=np.percentile(corr, [5, 95])) fig.tight_layout() ############################################################################## # Compute the degree and plot it # ------------------------------ # sphinx_gallery_thumbnail_number = 2 threshold_prop = 0.15 # percentage of strongest edges to keep in the graph degree = mne.connectivity.degree(corr, threshold_prop=threshold_prop) stc = mne.labels_to_stc(labels, degree) stc = stc.in_label(mne.Label(inv['src'][0]['vertno'], hemi='lh') + mne.Label(inv['src'][1]['vertno'], hemi='rh')) brain = stc.plot( clim=dict(kind='percent', lims=[75, 85, 95]), colormap='gnuplot', subjects_dir=subjects_dir, views='dorsal', hemi='both', smoothing_steps=25, time_label='Beta band') ############################################################################## # References # ---------- # .. footbibliography::
bsd-3-clause
Jimmy-Morzaria/scikit-learn
benchmarks/bench_random_projections.py
397
8900
""" =========================== Random projection benchmark =========================== Benchmarks for random projections. """ from __future__ import division from __future__ import print_function import gc import sys import optparse from datetime import datetime import collections import numpy as np import scipy.sparse as sp from sklearn import clone from sklearn.externals.six.moves import xrange from sklearn.random_projection import (SparseRandomProjection, GaussianRandomProjection, johnson_lindenstrauss_min_dim) def type_auto_or_float(val): if val == "auto": return "auto" else: return float(val) def type_auto_or_int(val): if val == "auto": return "auto" else: return int(val) def compute_time(t_start, delta): mu_second = 0.0 + 10 ** 6 # number of microseconds in a second return delta.seconds + delta.microseconds / mu_second def bench_scikit_transformer(X, transfomer): gc.collect() clf = clone(transfomer) # start time t_start = datetime.now() clf.fit(X) delta = (datetime.now() - t_start) # stop time time_to_fit = compute_time(t_start, delta) # start time t_start = datetime.now() clf.transform(X) delta = (datetime.now() - t_start) # stop time time_to_transform = compute_time(t_start, delta) return time_to_fit, time_to_transform # Make some random data with uniformly located non zero entries with # Gaussian distributed values def make_sparse_random_data(n_samples, n_features, n_nonzeros, random_state=None): rng = np.random.RandomState(random_state) data_coo = sp.coo_matrix( (rng.randn(n_nonzeros), (rng.randint(n_samples, size=n_nonzeros), rng.randint(n_features, size=n_nonzeros))), shape=(n_samples, n_features)) return data_coo.toarray(), data_coo.tocsr() def print_row(clf_type, time_fit, time_transform): print("%s | %s | %s" % (clf_type.ljust(30), ("%.4fs" % time_fit).center(12), ("%.4fs" % time_transform).center(12))) if __name__ == "__main__": ########################################################################### # Option parser ########################################################################### op = optparse.OptionParser() op.add_option("--n-times", dest="n_times", default=5, type=int, help="Benchmark results are average over n_times experiments") op.add_option("--n-features", dest="n_features", default=10 ** 4, type=int, help="Number of features in the benchmarks") op.add_option("--n-components", dest="n_components", default="auto", help="Size of the random subspace." " ('auto' or int > 0)") op.add_option("--ratio-nonzeros", dest="ratio_nonzeros", default=10 ** -3, type=float, help="Number of features in the benchmarks") op.add_option("--n-samples", dest="n_samples", default=500, type=int, help="Number of samples in the benchmarks") op.add_option("--random-seed", dest="random_seed", default=13, type=int, help="Seed used by the random number generators.") op.add_option("--density", dest="density", default=1 / 3, help="Density used by the sparse random projection." " ('auto' or float (0.0, 1.0]") op.add_option("--eps", dest="eps", default=0.5, type=float, help="See the documentation of the underlying transformers.") op.add_option("--transformers", dest="selected_transformers", default='GaussianRandomProjection,SparseRandomProjection', type=str, help="Comma-separated list of transformer to benchmark. " "Default: %default. Available: " "GaussianRandomProjection,SparseRandomProjection") op.add_option("--dense", dest="dense", default=False, action="store_true", help="Set input space as a dense matrix.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) opts.n_components = type_auto_or_int(opts.n_components) opts.density = type_auto_or_float(opts.density) selected_transformers = opts.selected_transformers.split(',') ########################################################################### # Generate dataset ########################################################################### n_nonzeros = int(opts.ratio_nonzeros * opts.n_features) print('Dataset statics') print("===========================") print('n_samples \t= %s' % opts.n_samples) print('n_features \t= %s' % opts.n_features) if opts.n_components == "auto": print('n_components \t= %s (auto)' % johnson_lindenstrauss_min_dim(n_samples=opts.n_samples, eps=opts.eps)) else: print('n_components \t= %s' % opts.n_components) print('n_elements \t= %s' % (opts.n_features * opts.n_samples)) print('n_nonzeros \t= %s per feature' % n_nonzeros) print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros) print('') ########################################################################### # Set transformer input ########################################################################### transformers = {} ########################################################################### # Set GaussianRandomProjection input gaussian_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed } transformers["GaussianRandomProjection"] = \ GaussianRandomProjection(**gaussian_matrix_params) ########################################################################### # Set SparseRandomProjection input sparse_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed, "density": opts.density, "eps": opts.eps, } transformers["SparseRandomProjection"] = \ SparseRandomProjection(**sparse_matrix_params) ########################################################################### # Perform benchmark ########################################################################### time_fit = collections.defaultdict(list) time_transform = collections.defaultdict(list) print('Benchmarks') print("===========================") print("Generate dataset benchmarks... ", end="") X_dense, X_sparse = make_sparse_random_data(opts.n_samples, opts.n_features, n_nonzeros, random_state=opts.random_seed) X = X_dense if opts.dense else X_sparse print("done") for name in selected_transformers: print("Perform benchmarks for %s..." % name) for iteration in xrange(opts.n_times): print("\titer %s..." % iteration, end="") time_to_fit, time_to_transform = bench_scikit_transformer(X_dense, transformers[name]) time_fit[name].append(time_to_fit) time_transform[name].append(time_to_transform) print("done") print("") ########################################################################### # Print results ########################################################################### print("Script arguments") print("===========================") arguments = vars(opts) print("%s \t | %s " % ("Arguments".ljust(16), "Value".center(12),)) print(25 * "-" + ("|" + "-" * 14) * 1) for key, value in arguments.items(): print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12))) print("") print("Transformer performance:") print("===========================") print("Results are averaged over %s repetition(s)." % opts.n_times) print("") print("%s | %s | %s" % ("Transformer".ljust(30), "fit".center(12), "transform".center(12))) print(31 * "-" + ("|" + "-" * 14) * 2) for name in sorted(selected_transformers): print_row(name, np.mean(time_fit[name]), np.mean(time_transform[name])) print("") print("")
bsd-3-clause
vybstat/scikit-learn
sklearn/manifold/t_sne.py
52
34602
# Author: Alexander Fabisch -- <[email protected]> # Author: Christopher Moody <[email protected]> # Author: Nick Travers <[email protected]> # License: BSD 3 clause (C) 2014 # This is the exact and Barnes-Hut t-SNE implementation. There are other # modifications of the algorithm: # * Fast Optimization for t-SNE: # http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf import numpy as np from scipy import linalg import scipy.sparse as sp from scipy.spatial.distance import pdist from scipy.spatial.distance import squareform from ..neighbors import BallTree from ..base import BaseEstimator from ..utils import check_array from ..utils import check_random_state from ..utils.extmath import _ravel from ..decomposition import RandomizedPCA from ..metrics.pairwise import pairwise_distances from . import _utils from . import _barnes_hut_tsne from ..utils.fixes import astype MACHINE_EPSILON = np.finfo(np.double).eps def _joint_probabilities(distances, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances. Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """ # Compute conditional probabilities such that they approximately match # the desired perplexity distances = astype(distances, np.float32, copy=False) conditional_P = _utils._binary_search_perplexity( distances, None, desired_perplexity, verbose) P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) return P def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """ # Compute conditional probabilities such that they approximately match # the desired perplexity distances = astype(distances, np.float32, copy=False) neighbors = astype(neighbors, np.int64, copy=False) conditional_P = _utils._binary_search_perplexity( distances, neighbors, desired_perplexity, verbose) m = "All probabilities should be finite" assert np.all(np.isfinite(conditional_P)), m P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) assert np.all(np.abs(P) <= 1.0) return P def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components, skip_num_points=0): """t-SNE objective function: gradient of the KL divergence of p_ijs and q_ijs and the absolute error. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution n = pdist(X_embedded, "sqeuclidean") n += 1. n /= degrees_of_freedom n **= (degrees_of_freedom + 1.0) / -2.0 Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) kl_divergence = 2.0 * np.dot(P, np.log(P / Q)) # Gradient: dC/dY grad = np.ndarray((n_samples, n_components)) PQd = squareform((P - Q) * n) for i in range(skip_num_points, n_samples): np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i]) grad = grad.ravel() c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad *= c return kl_divergence, grad def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples, n_components): """t-SNE objective function: the absolute error of the KL divergence of p_ijs and q_ijs. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. neighbors : array (n_samples, K) The neighbors is not actually required to calculate the divergence, but is here to match the signature of the gradient function degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution n = pdist(X_embedded, "sqeuclidean") n += 1. n /= degrees_of_freedom n **= (degrees_of_freedom + 1.0) / -2.0 Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) if len(P.shape) == 2: P = squareform(P) kl_divergence = 2.0 * np.dot(P, np.log(P / Q)) return kl_divergence def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples, n_components, angle=0.5, skip_num_points=0, verbose=False): """t-SNE objective function: KL divergence of p_ijs and q_ijs. Uses Barnes-Hut tree methods to calculate the gradient that runs in O(NlogN) instead of O(N^2) Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. neighbors: int64 array, shape (n_samples, K) Array with element [i, j] giving the index for the jth closest neighbor to point i. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. angle : float (default: 0.5) This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. verbose : int Verbosity level. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ params = astype(params, np.float32, copy=False) X_embedded = params.reshape(n_samples, n_components) neighbors = astype(neighbors, np.int64, copy=False) if len(P.shape) == 1: sP = squareform(P).astype(np.float32) else: sP = P.astype(np.float32) grad = np.zeros(X_embedded.shape, dtype=np.float32) error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors, grad, angle, n_components, verbose, dof=degrees_of_freedom) c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad = grad.ravel() grad *= c return error, grad def _gradient_descent(objective, p0, it, n_iter, objective_error=None, n_iter_check=1, n_iter_without_progress=50, momentum=0.5, learning_rate=1000.0, min_gain=0.01, min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0, args=None, kwargs=None): """Batch gradient descent with momentum and individual gains. Parameters ---------- objective : function or callable Should return a tuple of cost and gradient for a given parameter vector. When expensive to compute, the cost can optionally be None and can be computed every n_iter_check steps using the objective_error function. p0 : array-like, shape (n_params,) Initial parameter vector. it : int Current number of iterations (this function will be called more than once during the optimization). n_iter : int Maximum number of gradient descent iterations. n_iter_check : int Number of iterations before evaluating the global error. If the error is sufficiently low, we abort the optimization. objective_error : function or callable Should return a tuple of cost and gradient for a given parameter vector. n_iter_without_progress : int, optional (default: 30) Maximum number of iterations without progress before we abort the optimization. momentum : float, within (0.0, 1.0), optional (default: 0.5) The momentum generates a weight for previous gradients that decays exponentially. learning_rate : float, optional (default: 1000.0) The learning rate should be extremely high for t-SNE! Values in the range [100.0, 1000.0] are common. min_gain : float, optional (default: 0.01) Minimum individual gain for each parameter. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be aborted. min_error_diff : float, optional (default: 1e-7) If the absolute difference of two successive cost function values is below this threshold, the optimization will be aborted. verbose : int, optional (default: 0) Verbosity level. args : sequence Arguments to pass to objective function. kwargs : dict Keyword arguments to pass to objective function. Returns ------- p : array, shape (n_params,) Optimum parameters. error : float Optimum. i : int Last iteration. """ if args is None: args = [] if kwargs is None: kwargs = {} p = p0.copy().ravel() update = np.zeros_like(p) gains = np.ones_like(p) error = np.finfo(np.float).max best_error = np.finfo(np.float).max best_iter = 0 for i in range(it, n_iter): new_error, grad = objective(p, *args, **kwargs) grad_norm = linalg.norm(grad) inc = update * grad >= 0.0 dec = np.invert(inc) gains[inc] += 0.05 gains[dec] *= 0.95 np.clip(gains, min_gain, np.inf) grad *= gains update = momentum * update - learning_rate * grad p += update if (i + 1) % n_iter_check == 0: if new_error is None: new_error = objective_error(p, *args) error_diff = np.abs(new_error - error) error = new_error if verbose >= 2: m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f" print(m % (i + 1, error, grad_norm)) if error < best_error: best_error = error best_iter = i elif i - best_iter > n_iter_without_progress: if verbose >= 2: print("[t-SNE] Iteration %d: did not make any progress " "during the last %d episodes. Finished." % (i + 1, n_iter_without_progress)) break if grad_norm <= min_grad_norm: if verbose >= 2: print("[t-SNE] Iteration %d: gradient norm %f. Finished." % (i + 1, grad_norm)) break if error_diff <= min_error_diff: if verbose >= 2: m = "[t-SNE] Iteration %d: error difference %f. Finished." print(m % (i + 1, error_diff)) break if new_error is not None: error = new_error return p, error, i def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False): """Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as .. math:: T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} \sum_{j \in U^{(k)}_i (r(i, j) - k)} where :math:`r(i, j)` is the rank of the embedded datapoint j according to the pairwise distances between the embedded datapoints, :math:`U^{(k)}_i` is the set of points that are in the k nearest neighbors in the embedded space but not in the original space. * "Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study" J. Venna, S. Kaski * "Learning a Parametric Embedding by Preserving Local Structure" L.J.P. van der Maaten Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. X_embedded : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. n_neighbors : int, optional (default: 5) Number of neighbors k that will be considered. precomputed : bool, optional (default: False) Set this flag if X is a precomputed square distance matrix. Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding. """ if precomputed: dist_X = X else: dist_X = pairwise_distances(X, squared=True) dist_X_embedded = pairwise_distances(X_embedded, squared=True) ind_X = np.argsort(dist_X, axis=1) ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1] n_samples = X.shape[0] t = 0.0 ranks = np.zeros(n_neighbors) for i in range(n_samples): for j in range(n_neighbors): ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0] ranks -= n_neighbors t += np.sum(ranks[ranks > 0]) t = 1.0 - t * (2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))) return t class TSNE(BaseEstimator): """t-distributed Stochastic Neighbor Embedding. t-SNE [1] is a tool to visualize high-dimensional data. It converts similarities between data points to joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE has a cost function that is not convex, i.e. with different initializations we can get different results. It is highly recommended to use another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for sparse data) to reduce the number of dimensions to a reasonable amount (e.g. 50) if the number of features is very high. This will suppress some noise and speed up the computation of pairwise distances between samples. For more tips see Laurens van der Maaten's FAQ [2]. Read more in the :ref:`User Guide <t_sne>`. Parameters ---------- n_components : int, optional (default: 2) Dimension of the embedded space. perplexity : float, optional (default: 30) The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selcting a value between 5 and 50. The choice is not extremely critical since t-SNE is quite insensitive to this parameter. early_exaggeration : float, optional (default: 4.0) Controls how tight natural clusters in the original space are in the embedded space and how much space will be between them. For larger values, the space between natural clusters will be larger in the embedded space. Again, the choice of this parameter is not very critical. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. learning_rate : float, optional (default: 1000) The learning rate can be a critical parameter. It should be between 100 and 1000. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. If the cost function gets stuck in a bad local minimum increasing the learning rate helps sometimes. n_iter : int, optional (default: 1000) Maximum number of iterations for the optimization. Should be at least 200. n_iter_without_progress : int, optional (default: 30) Maximum number of iterations without progress before we abort the optimization. min_grad_norm : float, optional (default: 1E-7) If the gradient norm is below this threshold, the optimization will be aborted. metric : string or callable, optional The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. The default is "euclidean" which is interpreted as squared euclidean distance. init : string, optional (default: "random") Initialization of embedding. Possible options are 'random' and 'pca'. PCA initialization cannot be used with precomputed distances and is usually more globally stable than random initialization. verbose : int, optional (default: 0) Verbosity level. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. Note that different initializations might result in different local minima of the cost function. method : string (default: 'barnes_hut') By default the gradient calculation algorithm uses Barnes-Hut approximation running in O(NlogN) time. method='exact' will run on the slower, but exact, algorithm in O(N^2) time. The exact algorithm should be used when nearest-neighbor errors need to be better than 3%. However, the exact method cannot scale to millions of examples. angle : float (default: 0.5) Only used if method='barnes_hut' This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. Attributes ---------- embedding_ : array-like, shape (n_samples, n_components) Stores the embedding vectors. Examples -------- >>> import numpy as np >>> from sklearn.manifold import TSNE >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) >>> model = TSNE(n_components=2, random_state=0) >>> np.set_printoptions(suppress=True) >>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE array([[ 0.00017599, 0.00003993], [ 0.00009891, 0.00021913], [ 0.00018554, -0.00009357], [ 0.00009528, -0.00001407]]) References ---------- [1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html [3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms. Journal of Machine Learning Research 15(Oct):3221-3245, 2014. http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf """ def __init__(self, n_components=2, perplexity=30.0, early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000, n_iter_without_progress=30, min_grad_norm=1e-7, metric="euclidean", init="random", verbose=0, random_state=None, method='barnes_hut', angle=0.5): if init not in ["pca", "random"] or isinstance(init, np.ndarray): msg = "'init' must be 'pca', 'random' or a NumPy array" raise ValueError(msg) self.n_components = n_components self.perplexity = perplexity self.early_exaggeration = early_exaggeration self.learning_rate = learning_rate self.n_iter = n_iter self.n_iter_without_progress = n_iter_without_progress self.min_grad_norm = min_grad_norm self.metric = metric self.init = init self.verbose = verbose self.random_state = random_state self.method = method self.angle = angle self.embedding_ = None def _fit(self, X, skip_num_points=0): """Fit the model using X as training data. Note that sparse arrays can only be handled by method='exact'. It is recommended that you convert your sparse array to dense (e.g. `X.toarray()`) if it fits in memory, or otherwise using a dimensionality reduction technique (e.g. TrucnatedSVD). Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Note that this when method='barnes_hut', X cannot be a sparse array and if need be will be converted to a 32 bit float array. Method='exact' allows sparse arrays and 64bit floating point inputs. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. """ if self.method not in ['barnes_hut', 'exact']: raise ValueError("'method' must be 'barnes_hut' or 'exact'") if self.angle < 0.0 or self.angle > 1.0: raise ValueError("'angle' must be between 0.0 - 1.0") if self.method == 'barnes_hut' and sp.issparse(X): raise TypeError('A sparse matrix was passed, but dense ' 'data is required for method="barnes_hut". Use ' 'X.toarray() to convert to a dense numpy array if ' 'the array is small enough for it to fit in ' 'memory. Otherwise consider dimensionality ' 'reduction techniques (e.g. TruncatedSVD)') X = check_array(X, dtype=np.float32) else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) random_state = check_random_state(self.random_state) if self.early_exaggeration < 1.0: raise ValueError("early_exaggeration must be at least 1, but is " "%f" % self.early_exaggeration) if self.n_iter < 200: raise ValueError("n_iter should be at least 200") if self.metric == "precomputed": if self.init == 'pca': raise ValueError("The parameter init=\"pca\" cannot be used " "with metric=\"precomputed\".") if X.shape[0] != X.shape[1]: raise ValueError("X should be a square distance matrix") distances = X else: if self.verbose: print("[t-SNE] Computing pairwise distances...") if self.metric == "euclidean": distances = pairwise_distances(X, metric=self.metric, squared=True) else: distances = pairwise_distances(X, metric=self.metric) if not np.all(distances >= 0): raise ValueError("All distances should be positive, either " "the metric or precomputed distances given " "as X are not correct") # Degrees of freedom of the Student's t-distribution. The suggestion # degrees_of_freedom = n_components - 1 comes from # "Learning a Parametric Embedding by Preserving Local Structure" # Laurens van der Maaten, 2009. degrees_of_freedom = max(self.n_components - 1.0, 1) n_samples = X.shape[0] # the number of nearest neighbors to find k = min(n_samples - 1, int(3. * self.perplexity + 1)) neighbors_nn = None if self.method == 'barnes_hut': if self.verbose: print("[t-SNE] Computing %i nearest neighbors..." % k) if self.metric == 'precomputed': # Use the precomputed distances to find # the k nearest neighbors and their distances neighbors_nn = np.argsort(distances, axis=1)[:, :k] else: # Find the nearest neighbors for every point bt = BallTree(X) # LvdM uses 3 * perplexity as the number of neighbors # And we add one to not count the data point itself # In the event that we have very small # of points # set the neighbors to n - 1 distances_nn, neighbors_nn = bt.query(X, k=k + 1) neighbors_nn = neighbors_nn[:, 1:] P = _joint_probabilities_nn(distances, neighbors_nn, self.perplexity, self.verbose) else: P = _joint_probabilities(distances, self.perplexity, self.verbose) assert np.all(np.isfinite(P)), "All probabilities should be finite" assert np.all(P >= 0), "All probabilities should be zero or positive" assert np.all(P <= 1), ("All probabilities should be less " "or then equal to one") if self.init == 'pca': pca = RandomizedPCA(n_components=self.n_components, random_state=random_state) X_embedded = pca.fit_transform(X) elif isinstance(self.init, np.ndarray): X_embedded = self.init elif self.init == 'random': X_embedded = None else: raise ValueError("Unsupported initialization scheme: %s" % self.init) return self._tsne(P, degrees_of_freedom, n_samples, random_state, X_embedded=X_embedded, neighbors=neighbors_nn, skip_num_points=skip_num_points) def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded=None, neighbors=None, skip_num_points=0): """Runs t-SNE.""" # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P # and the Student's t-distributions Q. The optimization algorithm that # we use is batch gradient descent with three stages: # * early exaggeration with momentum 0.5 # * early exaggeration with momentum 0.8 # * final optimization with momentum 0.8 # The embedding is initialized with iid samples from Gaussians with # standard deviation 1e-4. if X_embedded is None: # Initialize embedding randomly X_embedded = 1e-4 * random_state.randn(n_samples, self.n_components) params = X_embedded.ravel() opt_args = {} opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0, "learning_rate": self.learning_rate, "verbose": self.verbose, "n_iter_check": 25, "kwargs": dict(skip_num_points=skip_num_points)} if self.method == 'barnes_hut': m = "Must provide an array of neighbors to use Barnes-Hut" assert neighbors is not None, m obj_func = _kl_divergence_bh objective_error = _kl_divergence_error sP = squareform(P).astype(np.float32) neighbors = neighbors.astype(np.int64) args = [sP, neighbors, degrees_of_freedom, n_samples, self.n_components] opt_args['args'] = args opt_args['min_grad_norm'] = 1e-3 opt_args['n_iter_without_progress'] = 30 # Don't always calculate the cost since that calculation # can be nearly as expensive as the gradient opt_args['objective_error'] = objective_error opt_args['kwargs']['angle'] = self.angle opt_args['kwargs']['verbose'] = self.verbose else: obj_func = _kl_divergence opt_args['args'] = [P, degrees_of_freedom, n_samples, self.n_components] opt_args['min_error_diff'] = 0.0 opt_args['min_grad_norm'] = 0.0 # Early exaggeration P *= self.early_exaggeration params, error, it = _gradient_descent(obj_func, params, **opt_args) opt_args['n_iter'] = 100 opt_args['momentum'] = 0.8 opt_args['it'] = it + 1 params, error, it = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print("[t-SNE] Error after %d iterations with early " "exaggeration: %f" % (it + 1, error)) # Save the final number of iterations self.n_iter_final = it # Final optimization P /= self.early_exaggeration opt_args['n_iter'] = self.n_iter opt_args['it'] = it + 1 params, error, it = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print("[t-SNE] Error after %d iterations: %f" % (it + 1, error)) X_embedded = params.reshape(n_samples, self.n_components) return X_embedded def fit_transform(self, X, y=None): """Fit X into an embedded space and return that transformed output. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. """ embedding = self._fit(X) self.embedding_ = embedding return self.embedding_ def fit(self, X, y=None): """Fit X into an embedded space. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. If the method is 'exact', X may be a sparse matrix of type 'csr', 'csc' or 'coo'. """ self.fit_transform(X) return self def _check_fitted(self): if self.embedding_ is None: raise ValueError("Cannot call `transform` unless `fit` has" "already been called")
bsd-3-clause
yabata/pyrenn
python/examples/example_classification_mnist.py
1
1739
import matplotlib as mpl import matplotlib.pyplot as plt import pickle import numpy as np import pyrenn as prn ### #Read Example Data mnist = pickle.load( open( "MNIST_data.pkl", "rb" ) ) P = mnist['P'] Y = mnist['Y'] Ptest = mnist['Ptest'] Ytest = mnist['Ytest'] ### #Create and train NN #create recurrent neural network with 28*28 inputs, #1 hidden layers with 10 neurons #and 10 outputs (one for each possible class/number) #the NN uses no delayed or recurrent inputs/connections net = prn.CreateNN([28*28,10,10]) batch_size = 1000 number_of_batches=20 for i in range(number_of_batches): r = np.random.randint(0,25000-batch_size) Ptrain = P[:,r:r+batch_size] Ytrain = Y[:,r:r+batch_size] #Train NN with training data Ptrain=input and Ytrain=target #Set maximum number of iterations k_max #Set termination condition for Error E_stop #The Training will stop after k_max iterations or when the Error <=E_stop net = prn.train_LM(Ptrain,Ytrain,net, verbose=True,k_max=1,E_stop=1e-5) print('Batch No. ',i,' of ',number_of_batches) ### #Select Test data #Choose random number 0...5000-9 idx = np.random.randint(0,5000-9) #Select 9 random Test input data P_ = Ptest[:,idx:idx+9] #Calculate NN Output for the 9 random test inputs Y_ = prn.NNOut(P_,net) ### #PLOT fig = plt.figure(figsize=[11,7]) gs = mpl.gridspec.GridSpec(3,3) for i in range(9): ax = fig.add_subplot(gs[i]) y_ = np.argmax(Y_[:,i]) #find index with highest value in NN output p_ = P_[:,i].reshape(28,28) #Convert input data for plotting ax.imshow(p_) #plot input data ax.set_xticks([]) ax.set_yticks([]) ax.set_title(str(y_), fontsize=18) plt.show()
gpl-3.0
RuthAngus/LSST-max
code/GProtation.py
1
6800
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt import george from george.kernels import ExpSine2Kernel, ExpSquaredKernel, WhiteKernel import glob import emcee try: import corner except: import triangle import h5py import subprocess from plotstuff import params, colours reb = params() cols = colours() import scipy.optimize as spo import time def lnprior(theta, plims): """ plims is a tuple, list or array containing the lower and upper limits for the rotation period. These are logarithmic! theta = A, l, Gamma, s, P """ if -20 < theta[0] < 20 and theta[4] < theta[1] and -20 < theta[2] < 20 \ and -20 < theta[3] < 20 and plims[0] < theta[4] < plims[1]: return 0. return -np.inf def Gprior(theta, plims): """ plims is an array containing the log(mean) and log(standard dev) of the Gaussian over period. theta = A, l, Gamma, s, P """ p = np.exp(theta[4]) mu, sig = np.exp(plims) if -20 < theta[0] < 20 and -20 < theta[2] < 20 and -20 < theta[3] < 20: return -.5*(p - mu)**2/sig**2 # Gaussian over p return -np.inf def Glnprior(theta, plims): """ plims is a tuple, list or array containing the lower and upper limits for the rotation period. These are not logarithmic! """ if -20 < theta[0] < 16 and -10 < theta[1] < 1 and -20 < theta[2] < 20 \ and -20 < theta[3] < 20 and np.log(plims[0]) < theta[4] < np.log(plims[1]): return -.5 * ((theta[4] - np.log(plims[2]))/(.5*np.log(plims[2])))**2 \ - .5 * ((theta[1] - .1*np.log(plims[2]))/(.1*plims[2]))**2 return -np.inf # lnprob def lnprob(theta, x, y, yerr, plims): return lnlike(theta, x, y, yerr) + lnprior(theta, plims) def Gprob(theta, x, y, yerr, plims): return lnlike(theta, x, y, yerr) + Gprior(theta, plims) # lnlike def lnlike(theta, x, y, yerr): theta = np.exp(theta) k = theta[0] * ExpSquaredKernel(theta[1]) \ * ExpSine2Kernel(theta[2], theta[4]) gp = george.GP(k, solver=george.HODLRSolver) try: gp.compute(x, np.sqrt(theta[3]+yerr**2)) except (ValueError, np.linalg.LinAlgError): return 10e25 return gp.lnlikelihood(y, quiet=True) # lnlike def neglnlike(theta, x, y, yerr): theta = np.exp(theta) k = theta[0] * ExpSquaredKernel(theta[1]) \ * ExpSine2Kernel(theta[2], theta[4]) gp = george.GP(k) try: gp.compute(x, np.sqrt(theta[3]+yerr**2)) except (ValueError, np.linalg.LinAlgError): return 10e25 return -gp.lnlikelihood(y, quiet=True) # make various plots def make_plot(sampler, x, y, yerr, ID, DIR, traces=False, tri=False, prediction=True): nwalkers, nsteps, ndims = np.shape(sampler) flat = np.reshape(sampler, (nwalkers * nsteps, ndims)) mcmc_result = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(flat, [16, 50, 84], axis=0))) mcmc_result = np.array([i[0] for i in mcmc_result]) print("\n", np.exp(np.array(mcmc_result[-1])), "period (days)", "\n") print(mcmc_result) np.savetxt("%s/%s_result.txt" % (DIR, ID), mcmc_result) fig_labels = ["A", "l", "G", "s", "P"] if traces: print("Plotting traces") for i in range(ndims): plt.clf() plt.plot(sampler[:, :, i].T, 'k-', alpha=0.3) plt.ylabel(fig_labels[i]) plt.savefig("%s/%s_%s.png" % (DIR, ID, fig_labels[i])) if tri: print("Making triangle plot") flat[:, -1] = np.exp(flat[:, -1]) try: fig = corner.corner(flat, labels=fig_labels) except: fig = triangle.corner(flat, labels=fig_labels) fig.savefig("%s/%s_triangle" % (DIR, ID)) print("%s/%s_triangle.png" % (DIR, ID)) if prediction: print("plotting prediction") theta = np.exp(np.array(mcmc_result)) k = theta[0] * ExpSquaredKernel(theta[1]) \ * ExpSine2Kernel(theta[2], theta[4]) gp = george.GP(k, solver=george.HODLRSolver) gp.compute(x, yerr) xs = np.linspace(x[0], x[-1], 1000) mu, cov = gp.predict(y, xs) plt.clf() plt.errorbar(x-x[0], y, yerr=yerr, **reb) plt.xlabel("$\mathrm{Time~(days)}$") plt.ylabel("$\mathrm{Normalised~Flux}$") plt.plot(xs, mu, color=cols.lightblue) plt.xlim(min(x), max(x)) plt.savefig("%s/%s_prediction" % (DIR, ID)) print("%s/%s_prediction.png" % (DIR, ID)) # take x, y, yerr and initial guess and do MCMC def MCMC(theta_init, x, y, yerr, plims, burnin, run, ID, DIR, nwalkers=32, logsamp=True, plot_inits=False): # figure out whether x, y and yerr are arrays or lists of lists quarters = False if len(x) < 20: quarters = True print("Quarter splits detected") print("\n", "log(theta_init) = ", theta_init) print("theta_init = ", np.exp(theta_init), "\n") if plot_inits: # plot initial guess and the result of minimize if quarters: xl = [i for j in x for i in j] yl = [i for j in y for i in j] yerrl = [i for j in yerr for i in j] print("plotting inits") print(np.exp(theta_init)) t = np.exp(theta_init) k = t[0] * ExpSquaredKernel(t[1]) * ExpSine2Kernel(t[2], t[3]) gp = george.GP(k) gp.compute(xl, yerrl) xs = np.linspace(xl[0], xl[-1], 1000) mu, cov = gp.predict(yl, xs) plt.clf() plt.errorbar(xl, yl, yerr=yerrl, **reb) plt.plot(xs, mu, color=cols.blue) args = (xl, yl, yerrl) results = spo.minimize(neglnlike, theta_init, args=args) print("optimisation results = ", results.x) r = np.exp(results.x) k = r[0] * ExpSquaredKernel(r[1]) * ExpSine2Kernel(r[2], r[3]) gp = george.GP(k) gp.compute(xl, yerrl) mu, cov = gp.predict(yl, xs) plt.plot(xs, mu, color=cols.pink, alpha=.5) plt.savefig("%s/%s_init" % (DIR, ID)) print("%s/%s_init.png" % (DIR, ID)) ndim, nwalkers = len(theta_init), nwalkers p0 = [theta_init+1e-4*np.random.rand(ndim) for i in range(nwalkers)] args = (x, y, yerr, plims) lp = lnprob if quarters: # if fitting each quarter separately, use a different lnprob lp = lnprob_split sampler = emcee.EnsembleSampler(nwalkers, ndim, lp, args=args) print("burning in...") p0, lp, state = sampler.run_mcmc(p0, burnin) sampler.reset() print("production run...") p0, lp, state = sampler.run_mcmc(p0, run) # save samples f = h5py.File("%s/%s_samples.h5" % (DIR, ID), "w") data = f.create_dataset("samples", np.shape(sampler.chain)) data[:, :] = np.array(sampler.chain) f.close() return sampler
mit
akrherz/iem
htdocs/plotting/auto/scripts100/p133.py
1
5164
"""snowfall totals around day""" import datetime from pandas.io.sql import read_sql from pyiem.plot.use_agg import plt from pyiem.util import get_autoplot_context, get_dbconn from pyiem.exceptions import NoDataFound def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc["data"] = True desc["cache"] = 86400 desc[ "description" ] = """This plot displays the total reported snowfall for a period prior to a given date and then after the date for the winter season. When you select a date to split the winter season, the year shown in the date can be ignored. """ desc["arguments"] = [ dict( type="station", name="station", default="IATDSM", label="Select Station:", network="IACLIMATE", ), dict( type="date", name="date", default="2015/12/25", min="2015/01/01", label="Split Season by Date: (ignore the year)", ), ] return desc def get_data(fdict): """ Get the data""" pgconn = get_dbconn("coop") ctx = get_autoplot_context(fdict, get_description()) station = ctx["station"] date = ctx["date"] jul1 = datetime.date(date.year if date.month > 6 else date.year - 1, 7, 1) offset = int((date - jul1).days) table = "alldata_%s" % (station[:2],) df = read_sql( f""" with obs as ( select day, day - ((case when month > 6 then year else year - 1 end)||'-07-01')::date as doy, (case when month > 6 then year else year - 1 end) as eyear, snow from {table} where station = %s) SELECT eyear, sum(case when doy < %s then snow else 0 end) as before, sum(case when doy >= %s then snow else 0 end) as after, sum(snow) as total from obs GROUP by eyear ORDER by eyear ASC """, pgconn, params=(station, offset, offset), index_col="eyear", ) df = df[df["total"] > 0] return df def highcharts(fdict): """ Highcharts Output """ ctx = get_autoplot_context(fdict, get_description()) station = ctx["station"] date = ctx["date"] df = get_data(fdict) j = dict() j["title"] = { "text": "%s [%s] Snowfall Totals" % (ctx["_nt"].sts[station]["name"], station) } j["subtitle"] = { "text": "Before and After %s" % (date.strftime("%-d %B"),) } j["xAxis"] = { "title": { "text": "Snowfall Total [inch] before %s" % (date.strftime("%-d %B"),) }, "plotLines": [ { "color": "red", "value": df["before"].mean(), "width": 1, "label": {"text": "%.1fin" % (df["before"].mean(),)}, "zindex": 2, } ], } j["yAxis"] = { "title": { "text": "Snowfall Total [inch] on or after %s" % (date.strftime("%-d %B"),) }, "plotLines": [ { "color": "red", "value": df["after"].mean(), "width": 1, "label": {"text": "%.1fin" % (df["after"].mean(),)}, "zindex": 2, } ], } j["chart"] = {"zoomType": "xy", "type": "scatter"} rows = [] for yr, row in df.iterrows(): rows.append( dict( x=round(row["before"], 2), y=round(row["after"], 2), name="%s-%s" % (yr, yr + 1), ) ) j["series"] = [ { "data": rows, "tooltip": { "headerFormat": "", "pointFormat": ( "<b>Season:</b> {point.name}<br />" "Before: {point.x} inch<br />" "After: {point.y} inch" ), }, } ] return j def plotter(fdict): """ Go """ ctx = get_autoplot_context(fdict, get_description()) station = ctx["station"] date = ctx["date"] df = get_data(fdict) if df.empty: raise NoDataFound("Error, no results returned!") (fig, ax) = plt.subplots(1, 1) ax.scatter(df["before"].values, df["after"].values) ax.set_xlim(left=-0.1) ax.set_ylim(bottom=-0.1) ax.set_xlabel( "Snowfall Total [inch] Prior to %s" % (date.strftime("%-d %b"),) ) ax.set_ylabel( "Snowfall Total [inch] On and After %s" % (date.strftime("%-d %b"),) ) ax.grid(True) ax.set_title( ("%s [%s] Snowfall Totals\nPrior to and after: %s") % (ctx["_nt"].sts[station]["name"], station, date.strftime("%-d %B")) ) ax.axvline( df["before"].mean(), color="r", lw=2, label="Before Avg: %.1f" % (df["before"].mean(),), ) ax.axhline( df["after"].mean(), color="b", lw=2, label="After Avg: %.1f" % (df["after"].mean(),), ) ax.legend(ncol=2, fontsize=12) return fig, df if __name__ == "__main__": plotter(dict())
mit
abhisg/scikit-learn
examples/tree/plot_tree_regression.py
206
1476
""" =================================================================== Decision Tree Regression =================================================================== A 1D regression with decision tree. The :ref:`decision trees <tree>` is used to fit a sine curve with addition noisy observation. As a result, it learns local linear regressions approximating the sine curve. We can see that if the maximum depth of the tree (controlled by the `max_depth` parameter) is set too high, the decision trees learn too fine details of the training data and learn from the noise, i.e. they overfit. """ print(__doc__) # Import the necessary modules and libraries import numpy as np from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt # Create a random dataset rng = np.random.RandomState(1) X = np.sort(5 * rng.rand(80, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - rng.rand(16)) # Fit regression model regr_1 = DecisionTreeRegressor(max_depth=2) regr_2 = DecisionTreeRegressor(max_depth=5) regr_1.fit(X, y) regr_2.fit(X, y) # Predict X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis] y_1 = regr_1.predict(X_test) y_2 = regr_2.predict(X_test) # Plot the results plt.figure() plt.scatter(X, y, c="k", label="data") plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2) plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Decision Tree Regression") plt.legend() plt.show()
bsd-3-clause
idbedead/RNA-sequence-tools
FPKM_Parsing/scLVM_run.py
2
6041
import sys import scipy as SP import pylab as PL import numpy as np from matplotlib import cm import h5py import os import GPy import pandas as pd import cPickle as pickle #adjust path scLVM_BASE = '/Volumes/Seq_data/count-picard_combined_ips17_BU3' from scLVM import scLVM #sys.path.append(scLVM_BASE) #sys.path.append( scLVM_BASE +'..') #sys.path.append(scLVM_BASE + 'scLVM/utils') #this is not included in the github repo #sys.path.append(scLVM_BASE +'CFG') #from misc import * #from barplot import * #from default import * from scLVM.utils.barplot import * from scLVM.utils.misc import * from IPython.display import Latex data = os.path.join(scLVM_BASE,'data_ips__normCounts.h5f') f = h5py.File(data,'r') # gene expression matrix Y = f['LogNcountsGene'][:] # technical noise tech_noise = f['LogVar_techGene'][:] # index of heterogeneous genes genes_het_bool=f['genes_heterogen'][:] # gene names geneID = f['gene_names'][:] cell_names = f['cell_names'][:] # idx of cell cycle genes from GO cellcyclegenes_filter = SP.unique(f['cellcyclegenes_filter'][:].ravel() -1) # idx of cell cycle genes from cycle base cellcyclegenes_filterCB = f['ccCBall_gene_indices'][:].ravel() -1 # filter cell cycle genes idx_cell_cycle = SP.union1d(cellcyclegenes_filter,cellcyclegenes_filterCB) # determine non-zero counts idx_nonzero = SP.nonzero((Y.mean(0)**2)>0)[0] idx_cell_cycle_noise_filtered = SP.intersect1d(idx_cell_cycle,idx_nonzero) # subset gene expression matrix Ycc = Y[:,idx_cell_cycle_noise_filtered] plt = PL.subplot(1,1,1) PL.imshow(Ycc,cmap=cm.RdBu,vmin=-3,vmax=+3,interpolation='None') #PL.colorbar() plt.set_xticks([]) plt.set_yticks([]) PL.xlabel('genes') PL.ylabel('cells') PL.ylabel('Variance explained') PL.show() k = 80 # number of latent factors out_dir = scLVM_BASE + 'cache' # folder where results are cached file_name = 'Kcc.hdf5' # name of the cache file recalc = True # recalculate X and Kconf use_ard = True # use automatic relevance detection sclvm = scLVM(Y) #Fit model with 80 factors X_ARD,Kcc_ARD,varGPLVM_ARD = sclvm.fitGPLVM(idx=idx_cell_cycle_noise_filtered,k=k,out_dir=out_dir,file_name=file_name,recalc=recalc, use_ard=use_ard) #Plot variance contributions from ARD plt = PL.subplot(1,1,1) PL.title('Variance explained by latent factors') PL.scatter(SP.arange(k)+1,varGPLVM_ARD['X_ARD']) PL.xlim([0,k+1]) PL.xlabel('# Factor') PL.ylabel('Variance explained') PL.show() #Fit model with a single factor (rank 1 covariance matrix) X,Kcc,varGPLVM = sclvm.fitGPLVM(idx=idx_cell_cycle_noise_filtered,k=1,out_dir='./cache',file_name=file_name,recalc=True, use_ard=False) #Plot inferred similarity matrix plt = PL.subplot(1,1,1) PL.title('Similarity matrix based on cell cycle') PL.imshow(Kcc,cmap=cm.RdBu,vmin=-3,vmax=+3,interpolation='None') PL.colorbar() plt.set_xticks([]) plt.set_yticks([]) PL.xlabel('cells') PL.ylabel('cells') PL.ylabel('Variance explained') PL.show() # considers only heterogeneous genes Ihet = genes_het_bool==1 Y = Y[:,Ihet] tech_noise = tech_noise[Ihet] geneID = geneID[Ihet] print geneID print len(geneID) #optionally: restrict range for the analysis i0 = 0 # gene from which the analysis starts i1 = 6025 # gene at which the analysis ends # construct sclvm object sclvm = scLVM(Y,geneID=geneID,tech_noise=tech_noise) # fit the model from i0 to i1 sclvm.varianceDecomposition(K=Kcc,i0=i0,i1=i1) normalize=True # variance components are normalizaed to sum up to one # get variance components var, var_info = sclvm.getVarianceComponents(normalize=normalize) var_filtered = var[var_info['conv']] # filter out genes for which vd has not converged # get corrected expression levels Ycorr = sclvm.getCorrectedExpression() with open(os.path.join(scLVM_BASE,'scLVM_Ycorr.p'), 'wb') as fp1: pickle.dump(Ycorr, fp1) print Ycorr.shape Ycorr_df = pd.DataFrame(Ycorr, index=cell_names, columns=geneID) Ycorr_df.to_csv(os.path.join(scLVM_BASE,'scLVM_Ycorr.txt'), sep = '\t') #calculate average variance components across all genes and visualize var_mean = var_filtered.mean(0) colors = ['Green','MediumBlue','Gray'] pp=PL.pie(var_mean,labels=var_info['col_header'],autopct='%1.1f%%',colors=colors,shadow=True, startangle=0) PL.show() H2=1-var_filtered[:,2] var_comp_fileds = SP.array([[0, 'cell cycle', 'Peru'], [1, 'biol. var', 'DarkMagenta'], [2, 'tech. var', '#92c5de']], dtype=object) var_plot(var_filtered,H2,var_comp_fileds,normalize=True, figsize=[5,4]) PL.show() i0 = 0 # gene from which the analysis starts i1 = 20 # gene to which the analysis ends # fit lmm without correction pv0,beta0,info0 = sclvm.fitLMM(K=None,i0=i0,i1=i1,verbose=False) # fit lmm with correction pv1,beta1,info1 = sclvm.fitLMM(K=Kcc,i0=i0,i1=i1,verbose=False) plt=PL.subplot(2,2,1) PL.title('Without Correction') p=PL.imshow(beta0[:,i0:i1],cmap=cm.RdBu,vmin=-0.6,vmax=+1,interpolation='None') PL.colorbar() plt.set_xticks([]) plt.set_yticks([]) PL.xlabel('gene'),PL.ylabel('gene') plt=PL.subplot(2,2,2) PL.title('With Correction') p=PL.imshow(beta1[:,i0:i1],cmap=cm.RdBu,vmin=-0.6,vmax=+1,interpolation='None') PL.colorbar() plt.set_xticks([]) plt.set_yticks([]) PL.xlabel('gene'),PL.ylabel('gene') PL.show() np.savetxt(os.path.join(scLVM_BASE,'Ycorr.txt'),Ycorr) # Model optimization Ystd = Ycorr-Ycorr.mean(0) Ystd/=Ystd.std(0) input_dim = 2 # How many latent dimensions to use kern = GPy.kern.RBF(input_dim,ARD=True) # ARD kernel m = GPy.models.BayesianGPLVM(Ystd, input_dim=input_dim, kernel=kern, num_inducing=40) m.optimize('scg', messages=0, max_iters=2000) m.kern.plot_ARD() PL.show() gene_to_plot = 'NKX2-1' i_nkx = SP.where(geneID==gene_to_plot) color = Ycorr[:,i_nkx] #color = Ycorr[:,0] PL.scatter(m.X[:,0]['mean'], m.X[:,1]['mean'], 40, color) PL.xlabel('PC1') PL.ylabel('PC2') PL.title(gene_to_plot) PL.colorbar() PL.show() [S,W] = PCA(Ystd,2) PL.scatter(S[:,0],S[:,1], 40, color) PL.xlabel('PC1') PL.ylabel('PC2') PL.colorbar() PL.title(gene_to_plot) PL.show()
mit
jmoles/trail-viewer
GATools/chart.py
1
17428
import datetime import os import StringIO import base64 import matplotlib.pyplot as pyplot import matplotlib.backends.backend_agg as pltagg import numpy as np import sys import plotly.plotly as py from plotly.graph_objs import Scatter, Data, Layout, XAxis, YAxis, ZAxis from plotly.graph_objs import Figure, Line, Bar, Scatter3d, Scene, Surface from plotly.graph_objs import Heatmap, ErrorY import time from DBUtils import DBUtils class chart: def __init__(self): self.__pgdb = DBUtils() # Caches used for some functions. self.__gen_data_cache = {} @staticmethod def __generate_plotly_url(fig, **kwargs): """ Returns a ready-to-embed URL to a provided fig. """ # Sign in, if necessary. if py.get_credentials()["username"] == "": py.sign_in("jmoles", os.environ.get('PLOTLY_API_KEY')) return py.plot( fig, auto_open=False, **kwargs) @staticmethod def plotly_single_run_set(run_id, run_info=None): # Establish a database connection pgdb = DBUtils() # Fetch this run's information, if not provided. if run_info is None: run_info = pgdb.fetchRunInfo(run_id)[run_id] # Determine the maximum amount of food and moves possible. trail_data = pgdb.getTrailData(run_info["trails_id"])[0] max_food = np.bincount(np.squeeze(np.asarray(trail_data.flatten())))[1] max_moves = np.array(run_info["moves_limit"]) # Fetch the data on the run and determine number of generations. gens_data = pgdb.fetchRunGenerations([run_id])[run_id] num_gens = len(gens_data) x = np.linspace(0, num_gens - 1, num=num_gens) # Settings used for plotting. chart_set_config = { "food" : { "db_key" : "food", "stats" : ["max", "min", "avg", "std"], "title" : "Food vs. Generations for Run ID {0}", "type" : Scatter, "plot-mode" : "lines", "xaxis" : "Generations", "yaxis" : "Food Consumed", "max-line" : max_food, "max-title" : "Available" }, "moves-taken" : { "db_key" : "moves", "stats" : ["max", "min", "avg", "std"], "title" : "Moves Taken vs. Generations for Run ID {0}", "type" : Scatter, "plot-mode" : "lines", "xaxis" : "Generations", "yaxis" : "Moves Taken", "max-line" : max_moves, "max-title" : "Limit" }, "moves-dir" : { "db_key" : "moves", "stats" : ["left", "right", "forward", "none"], "title" : "Move Types vs. Generations for Run ID {0}", "type" : Scatter, "plot-mode" : "lines", "xaxis" : "Generations", "yaxis" : "Move Type", "max-line" : None, } } plot_urls = {} # TODO: Could multithread here to speed things up. for chart_type, settings in chart_set_config.items(): traces_list = [] # Go through each of the stats and build the traces. for stat in settings["stats"]: data_set = np.zeros((num_gens)) for curr_gen in range(0, num_gens): data_set[curr_gen] = ( gens_data[curr_gen] [settings["db_key"]][stat]) this_trace = settings["type"]( x=x, y=data_set, mode=settings["plot-mode"], name=stat.title() ) traces_list.append(this_trace) # If desired, add the maximum line. if settings["max-line"] is not None: y_val = np.empty(len(x)) y_val.fill(settings["max-line"]) traces_list.append( settings["type"]( x=x, y=y_val, mode="lines", line={ "dash" : "dash" }, name=settings["max-title"].title() ) ) layout = Layout( title=settings["title"].format(run_id), xaxis=XAxis( title=settings["xaxis"].format(run_id) ), yaxis=YAxis( title=settings["yaxis"].format(run_id) ), ) fig = Figure(data=Data(traces_list), layout=layout) # Generate the URL. plot_urls[chart_type] = chart.__generate_plotly_url(fig, filename="apigen/{0}_{1}".format(chart_type, run_id), fileopt='overwrite',) return plot_urls @staticmethod def sweep_charts(db_data, config_id, config_info, sweep_type, x_label, y_label=None): """ Given a set of db_data from DBUtils.fetch_run_config_sweep_by_network along with the config_id, and maximum amount of food, generates a food and moves taken sweep plot. Returns ready to embed URLs. """ plot_urls = {} is_3d = False # Determines if plot is 3d # Determine how to label the axes. if sweep_type == "selection": # Grab the x-axis labels for this plot. x_label_vals = [y[3] for y in [ db_data[x][0] for x in db_data]] else: x_label_vals = sorted(db_data.keys()) chart_set_config = { "food" : { "title" : "Food vs. {0} Sweep".format(x_label), "db-idx" : 0, "val-func" : [max, np.average], "plot-mode" : "lines", "xaxis" : x_label.title(), "yaxis" : "Food Consumed", "max-line" : config_info["max_food"], "max-title" : "Available", "label" : ["max", "mean", "std"] }, "moves-taken" : { "title" : "Moves Taken vs. {0} Sweep".format(x_label), "db-idx" : 1, "val-func" : [min, np.average], "plot-mode" : "lines", "xaxis" : x_label.title(), "yaxis" : "Moves Taken", "label" : ["min", "mean", "std"] }, "num-runs" : { "title" : "Number of runs", "db-idx" : 1, "val-func" : [len], "plot-mode" : "lines", "xaxis" : x_label.title(), "yaxis" : "Moves Taken", "label" : ["min", "mean", "std"] }, } # Add the max line for moves if not "moves_limit" type. if sweep_type != "moves_limit": chart_set_config["moves-taken"]["max-line"] = ( config_info["moves_limit"]) chart_set_config["moves-taken"]["max-title"] = "Limit" if (sweep_type == "p_mutate_crossover" or sweep_type == "dl_length_hidden"): for curr_key in chart_set_config.keys(): chart_set_config[curr_key]["xaxis"] = x_label chart_set_config[curr_key]["yaxis"] = y_label chart_set_config[curr_key]["type"] = Heatmap if curr_key == "food": chart_set_config[curr_key]["zaxis"] = "Food Consumed" chart_set_config[curr_key]["title"] = "Food 3D Sweep" chart_set_config[curr_key]["val-func"] = [max] elif curr_key == "moves-taken": chart_set_config[curr_key]["zaxis"] = "Food Consumed" chart_set_config[curr_key]["title"] = "Moves Taken 3D Sweep" chart_set_config[curr_key]["val-func"] = [min] elif curr_key == "num-runs": chart_set_config[curr_key]["zaxis"] = "Number of Runs" if sweep_type == "p_mutate_crossover": step_size = 0.1 else: step_size = 1.0 chart_set_config[curr_key]["step-size"] = step_size is_3d = True # TODO: Could multithread here to speed things up. for chart_type, settings in chart_set_config.items(): traces_list = [] for idx, this_func in enumerate(settings["val-func"]): x_vals = [] y_vals = [] z_vals = [] y_std_dev = [] if is_3d: y_vals = sorted(db_data.keys()) # Need to find the length of x and min/max x to # figure out the labels and empty spots on heat chart. len_y = len(y_vals) x_vals = [] for cy in y_vals: curr_x = sorted(db_data[cy].keys()) x_vals.extend(curr_x) x_vals = list(set(x_vals)) x_vals.sort() y_vals = list(np.around( np.arange( start=min(y_vals), stop=max(y_vals) + settings["step-size"], step=settings["step-size"]), decimals=4)) x_vals = list(np.around( np.arange( start=min(x_vals), stop=max(x_vals) + settings["step-size"], step=settings["step-size"]), decimals=4)) # Go through all of the y/x values and fill in z. for cy in y_vals: this_z = dict.fromkeys(x_vals) if cy in db_data: for cx in sorted(db_data[cy].keys()): this_z[cx] = this_func( [x[settings["db-idx"]] for x in db_data[cy][cx]]) this_z = [myz[1] for myz in sorted(this_z.items())] z_vals.append(this_z) this_trace = settings["type"]( x=x_vals, y=y_vals, z=z_vals, name=settings["label"][idx].title() ) else: for curr_x in sorted(db_data.keys()): y_vals.append(this_func( [x[settings["db-idx"]] for x in db_data[curr_x]])) if this_func == np.average: y_std_dev.append(np.std( [x[settings["db-idx"]] for x in db_data[curr_x]])) if this_func == np.average: this_trace = Scatter( x=x_label_vals, y=y_vals, mode=settings["plot-mode"], name=settings["label"][idx].title(), error_y=ErrorY( type='data', array=y_std_dev, visible=True, ) ) else: this_trace = Scatter( x=x_label_vals, y=y_vals, mode=settings["plot-mode"], name=settings["label"][idx].title() ) traces_list.append(this_trace) # If desired, add the maximum line. if "max-line" in settings and not is_3d: y_val = np.empty(len(x_label_vals)) y_val.fill(settings["max-line"]) traces_list.append( Scatter( x=x_label_vals, y=y_val, mode="lines", line={ "dash" : "dash" }, name=settings["max-title"].title() ) ) layout = Layout( title=settings["title"], xaxis=XAxis(title=settings["xaxis"]), yaxis=YAxis(title=settings["yaxis"]), ) fig = Figure(data=Data(traces_list), layout=layout) # Generate the URL. plot_urls[chart_type] = chart.__generate_plotly_url(fig, filename="apigen/sweep_{0}_{1}_{2}".format( ''.join(e for e in x_label if e.isalnum()), config_id, chart_type), fileopt="overwrite") return plot_urls def line_by_config_id(self, config_id, ext="png", stat_group="food", stat=None, show_title=True): if stat_group == "moves_stats" and stat == None: stat=["left", "right", "forward", "none"] elif stat == None: stat=["min", "max", "avg"] # Get the list of run_ids with this configuration. run_ids_l = self.__pgdb.getRunsWithConfigID(config_id) # Generate the figure and axes common to all of these. fig = pyplot.Figure() axis = fig.add_subplot(1,1,1) # Get information on the run run_info = self.__pgdb.fetchConfigInfo(config_id) max_food = run_info["max_food"] # Find the network name, trail name, and number of generations. net_name = run_info["network_name"] trail_name = run_info["trail_name"] num_gens = run_info["generations"] max_moves = np.array(run_info["moves_limit"]) # Take each run and now fetch data for each. ids_search_l = [] for curr_id in run_ids_l: if not self.__gen_data_cache.has_key(curr_id): ids_search_l.append(curr_id) if len(ids_search_l) > 0: self.__gen_data_cache = dict( self.__gen_data_cache.items() + self.__pgdb.fetchRunGenerations(ids_search_l).items()) gens_data = self.__gen_data_cache x = np.linspace(0, num_gens - 1, num=num_gens) for curr_stat in stat: data_set = np.zeros((num_gens)) for curr_gen in range(0, num_gens): if stat_group == "moves_stats": curr_stat_group = "moves" else: curr_stat_group = stat_group this_gen = [] for curr_run in run_ids_l: if curr_gen in gens_data[curr_run]: this_gen.append(gens_data[curr_run][curr_gen] [curr_stat_group][curr_stat]) else: this_gen.append(None) data_set[curr_gen] = np.mean( filter(lambda a: a is not None, this_gen)) axis.plot(x, data_set, '-', label=curr_stat.title()) if show_title: plot_title = ( "Mean - {0} - {1} g{2}/p{3}".format( net_name, trail_name, num_gens, run_info["population"])) axis.set_title(plot_title) # Determine the maximum type to show. if stat_group == "food": axis.plot(x, np.repeat(np.array(max_food), num_gens), 'r--') axis.axis((0, num_gens, 0, max_food + 5)) axis.set_ylabel("Food Consumed") axis.set_xlabel("Generations") axis.legend(loc="best") elif stat_group == "moves": axis.plot(x, np.repeat( np.array(max_moves), num_gens), 'r--') axis.axis((0, num_gens, 0, max_moves + 5)) axis.set_ylabel("Moves Taken") axis.set_xlabel("Generations") axis.legend(loc="lower left") elif stat_group == "moves_stats": axis.axis((0, num_gens, 0, max_moves + 5)) axis.set_ylabel("Moves Taken") axis.set_xlabel("Generations") axis.legend(loc="upper left", ncol=2) fig.set_facecolor('w') return (self.__createImage(fig, ext), len(run_ids_l)) def __createImage(self, fig, ext="jpg"): """ Takes a matplotlib fig and generates given ext type. Returns """ canvas = pltagg.FigureCanvasAgg(fig) output = StringIO.StringIO() if ext == "tif" or ext == "tiff": canvas.print_tif(output) elif ext == "bmp": canvas.print_bmp(output) elif ext == "eps": canvas.print_eps(output) elif ext == "png": canvas.print_png(output) elif ext == "pdf": canvas.print_pdf(output) elif ext == "svg": canvas.print_svg(output) else: canvas.print_jpg(output) return output
mit
Srisai85/scikit-learn
examples/cluster/plot_segmentation_toy.py
258
3336
""" =========================================== Spectral clustering for image segmentation =========================================== In this example, an image with connected circles is generated and spectral clustering is used to separate the circles. In these settings, the :ref:`spectral_clustering` approach solves the problem know as 'normalized graph cuts': the image is seen as a graph of connected voxels, and the spectral clustering algorithm amounts to choosing graph cuts defining regions while minimizing the ratio of the gradient along the cut, and the volume of the region. As the algorithm tries to balance the volume (ie balance the region sizes), if we take circles with different sizes, the segmentation fails. In addition, as there is no useful information in the intensity of the image, or its gradient, we choose to perform the spectral clustering on a graph that is only weakly informed by the gradient. This is close to performing a Voronoi partition of the graph. In addition, we use the mask of the objects to restrict the graph to the outline of the objects. In this example, we are interested in separating the objects one from the other, and not from the background. """ print(__doc__) # Authors: Emmanuelle Gouillart <[email protected]> # Gael Varoquaux <[email protected]> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.feature_extraction import image from sklearn.cluster import spectral_clustering ############################################################################### l = 100 x, y = np.indices((l, l)) center1 = (28, 24) center2 = (40, 50) center3 = (67, 58) center4 = (24, 70) radius1, radius2, radius3, radius4 = 16, 14, 15, 14 circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2 circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2 circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2 circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2 ############################################################################### # 4 circles img = circle1 + circle2 + circle3 + circle4 mask = img.astype(bool) img = img.astype(float) img += 1 + 0.2 * np.random.randn(*img.shape) # Convert the image into a graph with the value of the gradient on the # edges. graph = image.img_to_graph(img, mask=mask) # Take a decreasing function of the gradient: we take it weakly # dependent from the gradient the segmentation is close to a voronoi graph.data = np.exp(-graph.data / graph.data.std()) # Force the solver to be arpack, since amg is numerically # unstable on this example labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack') label_im = -np.ones(mask.shape) label_im[mask] = labels plt.matshow(img) plt.matshow(label_im) ############################################################################### # 2 circles img = circle1 + circle2 mask = img.astype(bool) img = img.astype(float) img += 1 + 0.2 * np.random.randn(*img.shape) graph = image.img_to_graph(img, mask=mask) graph.data = np.exp(-graph.data / graph.data.std()) labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack') label_im = -np.ones(mask.shape) label_im[mask] = labels plt.matshow(img) plt.matshow(label_im) plt.show()
bsd-3-clause
Djabbz/scikit-learn
examples/model_selection/plot_train_error_vs_test_error.py
349
2577
""" ========================= Train error vs Test error ========================= Illustration of how the performance of an estimator on unseen data (test data) is not the same as the performance on training data. As the regularization increases the performance on train decreases while the performance on test is optimal within a range of values of the regularization parameter. The example with an Elastic-Net regression model and the performance is measured using the explained variance a.k.a. R^2. """ print(__doc__) # Author: Alexandre Gramfort <[email protected]> # License: BSD 3 clause import numpy as np from sklearn import linear_model ############################################################################### # Generate sample data n_samples_train, n_samples_test, n_features = 75, 150, 500 np.random.seed(0) coef = np.random.randn(n_features) coef[50:] = 0.0 # only the top 10 features are impacting the model X = np.random.randn(n_samples_train + n_samples_test, n_features) y = np.dot(X, coef) # Split train and test data X_train, X_test = X[:n_samples_train], X[n_samples_train:] y_train, y_test = y[:n_samples_train], y[n_samples_train:] ############################################################################### # Compute train and test errors alphas = np.logspace(-5, 1, 60) enet = linear_model.ElasticNet(l1_ratio=0.7) train_errors = list() test_errors = list() for alpha in alphas: enet.set_params(alpha=alpha) enet.fit(X_train, y_train) train_errors.append(enet.score(X_train, y_train)) test_errors.append(enet.score(X_test, y_test)) i_alpha_optim = np.argmax(test_errors) alpha_optim = alphas[i_alpha_optim] print("Optimal regularization parameter : %s" % alpha_optim) # Estimate the coef_ on full data with optimal regularization parameter enet.set_params(alpha=alpha_optim) coef_ = enet.fit(X, y).coef_ ############################################################################### # Plot results functions import matplotlib.pyplot as plt plt.subplot(2, 1, 1) plt.semilogx(alphas, train_errors, label='Train') plt.semilogx(alphas, test_errors, label='Test') plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k', linewidth=3, label='Optimum on test') plt.legend(loc='lower left') plt.ylim([0, 1.2]) plt.xlabel('Regularization parameter') plt.ylabel('Performance') # Show estimated coef_ vs true coef plt.subplot(2, 1, 2) plt.plot(coef, label='True coef') plt.plot(coef_, label='Estimated coef') plt.legend() plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26) plt.show()
bsd-3-clause
pypot/scikit-learn
examples/decomposition/plot_incremental_pca.py
244
1878
""" =============== Incremental PCA =============== Incremental principal component analysis (IPCA) is typically used as a replacement for principal component analysis (PCA) when the dataset to be decomposed is too large to fit in memory. IPCA builds a low-rank approximation for the input data using an amount of memory which is independent of the number of input data samples. It is still dependent on the input data features, but changing the batch size allows for control of memory usage. This example serves as a visual check that IPCA is able to find a similar projection of the data to PCA (to a sign flip), while only processing a few samples at a time. This can be considered a "toy example", as IPCA is intended for large datasets which do not fit in main memory, requiring incremental approaches. """ print(__doc__) # Authors: Kyle Kastner # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_iris from sklearn.decomposition import PCA, IncrementalPCA iris = load_iris() X = iris.data y = iris.target n_components = 2 ipca = IncrementalPCA(n_components=n_components, batch_size=10) X_ipca = ipca.fit_transform(X) pca = PCA(n_components=n_components) X_pca = pca.fit_transform(X) for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]: plt.figure(figsize=(8, 8)) for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names): plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1], c=c, label=target_name) if "Incremental" in title: err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean() plt.title(title + " of iris dataset\nMean absolute unsigned error " "%.6f" % err) else: plt.title(title + " of iris dataset") plt.legend(loc="best") plt.axis([-4, 4, -1.5, 1.5]) plt.show()
bsd-3-clause
martinpilat/dag-evaluate
aggregate_logs.py
1
1209
__author__ = 'Martin' import sys import json import matplotlib.pyplot as plt import numpy as np def aggregate(logs, field): agg = map(lambda log: [x[field] for x in log], logs) agg = np.array(list(zip(*agg))) print(agg) return np.array([np.min(agg, axis=1), np.mean(agg, axis=1), np.max(agg, axis=1)]).T if __name__ == '__main__': log_files = sys.argv[1:] outname = log_files[0].split('.')[0] logs = [] for lf in log_files: log = json.load(open(lf)) logs.append(log) for field in ['max_score', 'min_score', 'avg_score', 'max_time', 'min_time', 'avg_time', 'max_std', 'min_std', 'avg_std', 'min_size', 'max_size', 'avg_size', 'invalid_dags']: aggregated = aggregate(logs, field) out = outname + '-agg_' + field np.savetxt(out + '.csv', aggregate(logs, field), delimiter=',') plt.figure(1, figsize=(6, 4)) plt.xlabel('Generation number') plt.ylabel(field) plt.errorbar(np.arange(len(aggregated.T[0])), aggregated.T[1], yerr=[aggregated.T[1]-aggregated.T[0], aggregated.T[2]-aggregated.T[1]]) plt.tight_layout() plt.savefig(out+'.png') plt.delaxes()
mit
JarbasAI/JarbasAI
jarbas_utils/deep_throat.py
1
197211
#!/usr/bin/python # -*- coding: utf-8 -*- """ ################################################################################ # # # deep throat # # # ################################################################################ # # # LICENCE INFORMATION # # # # This is a speech program. # # # # copyright (C) 2016 William Breaden Madden, name by Liam Moore # # # # This software is released under the terms of the GNU General Public License # # version 3 (GPLv3). # # # # This program is free software: you can redistribute it and/or modify it # # under the terms of the GNU General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # This program is distributed in the hope that it will be useful, but WITHOUT # # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # # more details. # # # # For a copy of the GNU General Public License, see # # <http://www.gnu.org/licenses/>. # # # ################################################################################ usage: program [options] options: -h, --help display help message --version display version and exit -v, --verbose verbose logging --interactive interactive mode --text=TEXT text to speak --phonemes=TEXT phonemes to speak --phonemesout translate text to phonemes and display them --datetime read date and time --datetimeloop read date and time in a loop --time read time --timeloop read time in a loop --infile=FILENAME input text filename --outfile=FILENAME output sound filename [default: deep_throat.wav] --savetowavefile save to WAVE file --translatenumbers=BOOL translate numbers to English text [default: true] --analysisvisual visual analysis --analysissound sound analysis """ from __future__ import division import docopt import os import re import struct import sys import time import wave import datavision import propyte import pyaudio import pyprel import shijian name = "deep throat" version = "2017-02-05T0145Z" logo = None def main(options): filename_input = options["--infile"] filename_output = options["--outfile"] text_read = options["--text"] phonemes_read = options["--phonemes"] mode_interactive = options["--interactive"] mode_phonemes_output = options["--phonemesout"] mode_datetime = options["--datetime"] mode_datetimeloop = options["--datetimeloop"] mode_time = options["--time"] mode_timeloop = options["--timeloop"] mode_file_output = options["--savetowavefile"] mode_translate_numbers = options["--translatenumbers"].lower() == "true" mode_analysis_visual = options["--analysisvisual"] mode_analysis_sound = options["--analysissound"] global verbose verbose = options["--verbose"] if text_read is not None and mode_phonemes_output is not True: if verbose: print("\nmode: say text") say( text=text_read, save_to_file=mode_file_output, filename_output=filename_output, explain=verbose, translate_numbers=mode_translate_numbers ) elif text_read is None and phonemes_read is not None: if verbose: print("\nmode: say phonemes") say( phonemes=phonemes_read, save_to_file=mode_file_output, filename_output=filename_output, explain=verbose ) elif mode_interactive: if verbose: print("\nmode: interactive") logo = pyprel.render_banner( text=name.upper() ) pyprel.print_line() print(pyprel.center_string(text=logo)) pyprel.print_line() print("Ctrl c to exit") while True: try: text = propyte.get_input(prompt=">") say( text=text, save_to_file=mode_file_output, filename_output=filename_output, explain=verbose ) except KeyboardInterrupt: sys.exit(1) elif mode_phonemes_output: if verbose: print("\nmode: phonemes output") if text_read is not None: text = text_read elif not sys.stdin.isatty(): input_stream = sys.stdin input_stream_list = [line for line in input_stream] text = " ".join(input_stream_list) else: print("no input text specified") exit() _phonemes = text_to_phonemes( text=text, explain=verbose ) print(_phonemes) elif filename_input is not None: if verbose: print("\nmode: say file") if os.path.isfile(filename_input): file_input = open(filename_input) say( text=file_input.read(), save_to_file=mode_file_output, filename_output=filename_output, explain=verbose ) file_input.close() else: print("file {filename} not found".format( filename=filename_input )) elif mode_datetime: if verbose: print("\nmode: say date and time") text = shijian.time_UTC( style="HH hours MM minutes SS sounds day DD month YYYY" ) print(text) say( text=text, save_to_file=mode_file_output, filename_output=filename_output, explain=verbose ) elif mode_datetimeloop: if verbose: print("\nmode: say date and time in loop") while True: text = shijian.time_UTC( style="HH hours MM minutes SS sounds day DD month YYYY" ) print(text) say( text=text, save_to_file=mode_file_output, filename_output=filename_output, explain=verbose ) time.sleep(10) elif mode_time: if verbose: print("\nmode: say time in loop") text = shijian.time_UTC( style="HH hours MM minutes SS seconds" ) print(text) say( text=text, save_to_file=mode_file_output, filename_output=filename_output, explain=verbose ) elif mode_timeloop: if verbose: print("\nmode: say time in loop") while True: text = shijian.time_UTC( style="HH hours MM minutes SS seconds" ) print(text) say( text=text, save_to_file=mode_file_output, filename_output=filename_output, explain=verbose ) time.sleep(10) elif not sys.stdin.isatty(): if verbose: print("\nmode: say pipe text") input_stream = sys.stdin input_stream_list = [line for line in input_stream] say( text=" ".join(input_stream_list), save_to_file=mode_file_output, filename_output=filename_output, explain=verbose ) elif mode_analysis_visual: if verbose: print("\nmode: analysis visual") analysis( visual=True, sound=False ) elif mode_analysis_sound: if verbose: print("\nmode: analysis sound") analysis( visual=False, sound=True ) else: if verbose: print("\nmode: no operations specified") logo = pyprel.render_banner( text=name.upper() ) pyprel.print_line() print(pyprel.center_string(text=logo)) pyprel.print_line() print("no operations specified") print(__doc__) ################################################################################ # # # text-to-phonemes rules # # # ################################################################################ rules_English_to_phonemes_special_symbols = { "#": r"[AEIOUY]+", # one or more vowels ".": r"[BVDGJLMNRWZ]", # a voiced consonant; # one of (B, V, D, G, J, L, M, N, R, W, Z) "%": r"(?:ER|E|ES|ED|ING|EL)", # a suffix "&": r"(?:[SCGZXJ]|CH|SH)", # a sibilant "@": r"(?:[TSRDLZNJ]|TH|CH|SH)", # a consonant influencing the sound of a # following long u "^": r"[BCDFGHJKLMNPQRSTVWXZ]", # a single consonant "+": r"[EIY]", # a front vowel; one of (E, I, Y) ":": r"[BCDFGHJKLMNPQRSTVWXZ]*", # zero or more consonants } # Rules are defined in strings in a form easy for humans to read and write. # Rules have the form A/B/C/D. The character string occurring with left context # A and right context C gets the pronunciation D. rules_English_to_phonemes = [ # " /// " "A// /UH", "ARE/ / /AH-R", "AR/ /O/UH-R", "AR//#/EH-R", "AS/ ^/#/AE-A-S", "A//WA/UH", "AW///AW", "ANY/ ://EH-N-EE", "A//^+#/AE-A", "ALLY/#://UH-L-EE", "AL/ /#/UH-L", "AGAIN///UH-G-EH-N", "AG/#:/E/IH-J", "A//^+:#/AE", "A/ :/^+/AE-A", "ARR/ //UH-R", "ARR///AE-R", "AR/ ://AH-R", "AR// /AE-R", "AR///AH-R", "AIR///EH-R", "AI///AE-A", "AY///AE-A", "AU///AW", "AL/#:/ /UH-L", "ALS/#:/ /UH-L-Z", "ALK///AW-K", "AL//^/AW-L", "ABLE/ ://AE-A-B-UH-L", "ABLE///UH-B-UH-L", "ANG//+/AE-A-N-J", "ATHE/ C/ /AE-TH-EE", "A//A/AH", "A///AE", "BE/ /^#/B-IH", "BEING///B-EE-IH-N", "BOTH/ / /B-OH-TH", "BUS/ /#/B-IH-Z", "BUIL///B-IH-L", "B/ / /B-EE", "B///B", "CH/ /^/K", "CH/^E//K", "CH///CH", "CI/ S/#/S-AH-EE", "CI//A/SH", "CI//O/SH", "CI//EN/SH", "C//+/S", "CK///K", "COM//%/K-AH-M", "C/ / /S-EE", "C///K", "DED/#:/ /D-IH-D", "D/.E/ /D", "D/#^:E/ /T", "DE/ /^#/D-IH", "DO/ / /D-OO", "DOES/ //D-UH-Z", "DOING/ //D-OO-IH-N", "DOW/ //D-OH", "DU//A/J-OO", "D/ / /D-EE", "DOUGH///D-OH", "D///D", "E/#:/ /", "E/'^:/ /", "E/ :/ /EE", "ED/#/ /D", "E/#:/D /", "ER//EV/EH-V", "EVEN/ EL//EH-V-EH-N", "EVEN/ S//EH-V-EH-N", "E//^%/EE", "E//PH%/EE", "ERI//#/EE-R-EE", "ER/#:/#/AE-R", "ER//#/EH-R", "ER///AE-R", "EVEN/ //EE-V-EH-N", "E/#:/W/", "EW/@//OO", "EW///Y-OO", "E//O/EE", "ES/#:&/ /IH-Z", "E/#:/S /", "ELY/#://L-EE", "EMENT/#://M-EH-N-T", "EFUL///F-U-L", "EE///EE", "EARN///AE-R-N", "EAR/ /^/AE-R", "EAD///EH-D", "EA/#:/ /EE-UH", "EA//SU/EH", "EA///EE", "EIGH///AE-A", "EI///EE", "EYE/ //AH-EE", "EY///EE", "EU///Y-OO", "E/ / /EE", "E/^/ /", "E///EH", "FUL///F-U-L", "F/F//", "F/ / /EH-F", "F///F", "GIV///G-IH-V", "G/ /I^/G", "GE//T/G-EH", "GGES/SU//G-J-EH-SS", "G/G//", "G/ B#//G", "G//+/J", "GREAT///G-R-AE-A-T", "GH/#//", "G/ / /G-EE", "G///G", "HAV/ //H-AE-V", "HERE/ //H-EE-R", "HOUR/ //OH-AE-R", "HOW///H-OH", "H//#/H", "H/ / /H-AE-CH", "H///", "IN/ //IH-N", "I/ / /AH-EE", "IN//D/IH-N", "IER///EE-AE-R", "IED/#:R//EE-D", "IED// /AH-EE-D", "IEN///EE-EH-N", "IE//T/AH-EE-EH", "I/ :/%/AH-EE", "I//%/EE", "IE///EE", "INE/N//AH-EE-N", "IME/T//AH-EE-M", "I//^+:#/IH", "IR//#/AH-EE-R", "IS//%/AH-EE-S", "IX//%/IH-K-S", "IZ//%/AH-EE-Z", "I//D%/AH-EE", "I/+^/^+/IH", "I//T%/AH-EE", "I/#^:/^+/IH", "I//^+/AH-EE", "IR///AE-R", "IGH///AH-EE", "ILD///AH-EE-L-D", "IGN// /AH-EE-N", "IGN//^/AH-EE-N", "IGN//%/AH-EE-N", "IQUE///EE-K", "I///IH", "J/ / /J-A-EE", "J///J", "K//N/", "K/ / /K-A-EE", "K///K", "LO//C#/L-OH", "L/L//", "L/#^:/%/UH-L", "LEAD///L-EE-D", "L/ / /AE-L", "L///L", "MOV///M-OO-V", "M/ / /EH-M", "M///M", "NG/E/+/N-J", "NG//R/N", "NG//#/N", "NGL//%/N-UH-L", "NG///N", "NK///N-K", "NOW/ / /N-OH", "N/ / /EH-N", "N/N//", "N///N", "OF// /UH-V", "OROUGH///AE-R-OH", "OR/ F/TY/OH-R", "OR/#:/ /AE-R", "ORS/#:/ /AE-R-Z", "OR///AW-R", "ONE/ //W-UH-N", "OW//EL/OH", "OW///OH", "OVER/ //OH-V-AE-R", "OV///UH-V", "O//^%/OH", "O//^EN/OH", "O//^I#/OH", "OL//D/OH-L", "OUGHT///AH-T", "OUGH///UH-F", "OU/ /^L/UH", "OU/ //OH", "OU/H/S#/OH", "OUS///UH-S", "OUR/ F//OH-R", "OUR///AW-R", "OUD///U-D", "OUP///OO-P", "OU///OH", "OY///AW-EE", "OING///OH-IH-N", "OI///AW-EE", "OOR///OH-R", "OOK///U-K", "OOD///U-D", "OO///OO", "O//E/OH", "O// /OH", "OA// /OH", "ONLY/ //OH-N-L-EE", "ONCE/ //W-UH-N-S", "ON'T// /OH-N-T", "O/C/N/AH", "O//NG/AH", "O/^:/N/UH", "ON/I//UH-N", "ON/#:/ /UH-N", "ON/#^//UH-N", "O//ST /OH", "OF//^/AW-F", "OTHER///UH-TH-AE-R", "OSS// /AW-S", "OM/#^:/ /UH-M", "O///AH", "PH///F", "PEOP///P-EE-P", "POW///P-OH", "PUT// /P-U-T", "P/ / /P-EE", "P/P//", "P///P", "QUAR///K-W-AW-R", "QU/ //K-W", "QU///K", "Q/ / /K-OO", "Q///K", "RE/ /^#/R-EE", "R/ / /AH", "R/R//", "R///R", "SH///SH", "SION/#//ZH-UH-N", "SOME///S-AH-M", "SUR/#/#/ZH-AE-R", "SUR//#/SH-AE-R", "SU/#/#/ZH-OO", "SSU/#/#/SH-OO", "SED/#/ /Z-D", "S/#/#/Z", "SAID///S-EH-D", "SION/^//SH-UH-N", "S/S//", "S/./ /Z", "S/#:.E/ /Z", "S/#^:##/ /Z", "S/#^:#/ /S", "S/U/ /S", "S/ :#/ /Z", "SCH/ //S-K", "S//C+/", "SM/#//Z-M", "SN/#/ /Z-UH-N", "S/ / /EH-S", "S///S", "THE/ / /TH-UH", "TO// /T-OO", "THAT///TH-AE-T", "THIS/ / /TH-IH-S", "THEY/ //TH-AE-A", "THERE/ //TH-EH-R", "THER///TH-AE-R", "THEIR///TH-EH-EH", "THAN/ / /TH-AE-N", "THEM/ / /TH-EH-M", "THESE// /TH-EE-Z", "THEN/ //TH-EH-N", "THROUGH///TH-R-OO", "THOSE///TH-OH-Z", "THOUGH// /TH-OH", "THUS/ //TH-UH-S", "TH///TH", "TED/#:/ /T-IH-D", "TI/S/#N/CH", "TI//O/SH", "TI//A/T", "TIEN///SH-UH-N", "TUR//#/CH-AE-R", "TU//A/CH-OO", "TWO/ //T-OO", "T/ / /T-EE", "T/T//", "T///T", "UN/ /I/Y-OO-N", "UN/ //UH-N", "UPON/ //UH-P-AW-N", "UR/@/#/AE-R", "UR//#/Y-AE-R", "UR///AE-R", "U//^ /UH", "U//^^/UH", "UY///AH-EE", "U/ G/#/", "U/G/%/", "U/G/#/W", "U/#N//Y-OO", "UI/@//OO", "U/@//UH", "U///Y-OO", "VIEW///V-Y-OO", "V/ / /V-EE", "V///V", "WHERE/ //W-AE-R", "WA//S/W-AH", "WA//T/W-AH", "WHERE///WH-EH-R", "WHAT///WH-AH-T", "WHOL///H-OH-L", "WHO///H-OO", "WH///WH", "WAR///W-AH-R", "WOR///W-AE-R", "WR///R", "W/ / /D-AH-B-L-Y-OO", "W///W", "X//^/EH-K-S", "X/ / /EH-K-S", "X/ /#/Z-EH", "X///K-S", "YOUNG///Y-UH-N", "YOU/ //Y-OO", "YES/ //Y-EH-S", "Y/ / /WH-UH-Y", "Y/ //Y", "Y/#^:/ /EE", "Y/#^:/I/EE", "Y/ :/ /AH-EE", "Y/ :/#/AH-EE", "Y/ :/^+:#/IH", "Y/ :/^#/AH-EE", "Y///IH", "ZZ///T-Z", "Z/ / /Z-EH-D", "Z///Z", # numbers "0/ //Z-EE-R-OH", "1/ //W-UH-N", "2/ //T-OO", "3/ //TH-R-EE", "4/ //F-OH-R", "5/ //F-I-V", "6/ //S-IH-K-S", "7/ //S-EH-V-EH-N", "8/ //A-A-T", "9/ //N-I-N", "10/ //T-EH-N", "11/ //EH-L-EH-V-UH-N", "12/ //T-W-EH-L-V", "13/ //TH-IH-R-T-EE-N", "14/ //F-OH-R-T-EE-N", "15/ //F-IH-F-T-EE-N", "16/ //S-IH-K-S-T-EE-N", "17/ //S-EH-V-EH-N-T-EE-N", "18/ //A-A-T-EE-N", "19/ //N-I-N-T-EE-N", "20/ //T-W-EH-N-T-EE", "30/ //TH-U-R-T-EE", "40/ //F-OH-R-T-EE", "50/ //F-IH-F-T-EE", "60/ //S-IH-K-S-T-EE", "70/ //S-EH-V-EH-N-T-EE", "80/ //A-T-EE", "90/ //N-IH-N-T-EE", "HUNDRED/ // H-UH-N-D-R-EH-D", "THOUSAND/ //TH-AH-U-S-EH-N-D", "MILLION/ //M-IH-L-Y-UH-N", # ordinal numbers "FIRST/ //F-U-R-S-T", "SECOND/ //S-EH-K-U-N-D", "THIRD/ //TH-U-R-D ", "FOURTH/ //F-OH-R-TH", "FIFTH/ //F-IH-F-TH", "SIXTH/ //S-IH-K-S-TH", "SEVENTH/ //S-EH-V-EH-N-Th", "EIGHTH/ //A-A-T-TH", "NINTH/ //N-I-N-TH", "TENTH/ //T-EH-N-TH", # letters "A/ //A-EE", "B/ //B-EE", "C/ //S-EE", "D/ //D-EE", "E/ //EE-EE", "F/ //EH-F", "G/ //J-EE", "H/ //A-CH", "I/ //I", "J/ //J-A", "K/ //K-A", "L/ //EH-L", "M/ //EH-M", "N/ //EH-N", "O/ //OH-W", "P/ //P-EE", "Q/ //K-Y-OO", "R/ //AH-R", "S/ //EH-S", "T/ //T-EE", "U/ //Y-OO", "V/ //V-EE", "W/ //D-UH-B-L-Y-OO", "X/ //EH-K-S", "Y/ //W-I", "Z/ //Z-EE", # words "A/ //UH", "ABILITY/ //AE-B-IH-L-IH-T-EE", "ABOARD/ //UH-B-OH-R-D", "ABORT/ //UH-B-OH-R-T", "AFFIRMATIVE/ //AH-F-EH-R-M-AH-T-IH-V", "ALL/ //AW-L", "ALTER/ //AH-L-T-R", "AN/ //AE-N", "AND/ //AE-N-D", "ANDY/ //AE-N-D-EE", "ANY/ //EH-N-EE", "ANYBODY/ //AE-N-EE-B-AH-D-EE", "AT/ //AE-T", "ATTACKED/ //UH-T-AE-K-T", "BACKUP/ //B-AH-K-UH-P", "BASIC/ //B-A-S-IH-K", "BAUD/ //B-AW-D", "BE/ //B-EE", "BEGIN/ //B-EE-G-IH-N", "BOOT/ //B-OO-T", "BOSS/ //B-OH-S", "BREAK/ //B-R-A-K", "BUG/ //B-UH-G", "BY/ //B-I", "CALL/ //K-AW-L", "CALLING/ //K-AW-L-IH-N-G", "CAPABLE/ //K-A-P-UH-B-UH-L", "CHARLIE/ //CH-AH-R-L-EE", "CITY/ //S-IH-T-EE", "COLD/ //K-OH-L-D", "COMBINATIONS/ //K-AH-M-B-IH-N-A-SH-UH-N-S", "COMES/ //K-UH-M-S", "COMMAND/ //K-UH-M-AH-N-D", "COMPUTER/ //K-AH-M-P-Y-OO-T-OH-R", "CONSIDER/ //K-UH-N-S-IH-D-R", "CONTINUE/ //K-UH-N-T-IH-N-Y-OO", "COPYRIGHT/ //K-AH-P-EE-R-I-T", "CRASH/ //K-R-AH-SH", "CREATE/ //K-R-EE-A-T", "DANCING/ //D-AE-N-S-IH-N-G", "DECEMBER/ //D-EE-S-EH-M-B-UH-R", "DIFFERENT/ //D-IH-F-UH-R-EH-N-T", "DISK/ //D-IH-S-K", "DOG/ //D-AW-G", "DOG/ //D-OH-G", "DOING/ //D-OO-IH-N-G", "DONE/ //D-UH-N", "DOS/ //d-aw-s", "DOWN/ //D-AH-W-N", "DRIVE/ //D-R-AE-V", "EATING/ //EE-T-IH-N-G", "EMERGENCY/ //EE-M-R-J-EH-N-S-EE", "ENTER/ //EH-N-T-R", "ERROR/ //EH-R-UH-R", "ESCAPE/ //EH-S-K-A-P", "EXIT/ //EH-K-S-IH-T", "FALLS/ //F-AH-L-L-S", "FAST/ //F-AH-S-T", "FATAL/ //F-A-T-L", "FAULT/ //F-AW-L-T", "FIXED/ //F-IH-K-S-T", "FLOPPY/ //F-L-OH-P-EE", "FOLLOWING/ //F-AH-L-OH-W-IH-N-G", "FORAY/ //F-OH-R-A-EE", "GENTLY/ //J-EH-N-T-L-EE", "GIVES/ //G-IH-V-S", "HARD/ //H-AW-R-D", "HAS/ //H-AE-S", "HELLO/ //H-EH-L-OH-W", "HENDERSON/ //H-EH-N-D-R-S-UH-N", "HERE/ //H-EE-R", "HIM/ //H-IH-M", "HIS/ //H-IH-S", "HIT/ //H-IH-T", "HOT/ //H-AH-T", "HOW/ //H-AW-W", "HUNDRED/ //H-UH-N-D-R-EH-D", "IGNORE/ //IH-G-N-OH-R", "IN/ //IH-N", "INFORMATION/ //IH-N-F-AW-R-M-EY-SH-UH-N", "INSERT/ //IH-N-S-UH-R-T", "INTERFACE/ //IH-N-T-R-F-AH-Y-S", "IS/ //IH-Z", "IT/ //IH-T", "JUST/ //J-UH-S-T", "KEY/ //K-EE", "KILLER/ //K-IH-L-R", "KLUDGE/ //K-L-OO-D-ZH", "LIKE/ //L-I-K", "LIMITLESS/ //L-IH-M-IH-T-L-EH-S-S", "LOOK/ //L-U-K", "MAIL/ //M-A-UH-L", "MAN/ //M-AE-N", "MCGUIRE/ //M-IH-KG-W-I-R", "MEMORY/ //M-EH-M-OH-R-EE", "MISS/ //M-IH-S", "MODEM/ //M-OH-D-IH-M", "MOON/ //M-OO-N", "MUCH/ //M-UH-CH", "MY/ //M-I", "NAME/ //N-A-M", "NO/ //N-OH", "OF/ //UH-F", "ON/ //AH-N", "OR/ //AW-R", "OUT/ //AH-W-T", "OUT/ //AW-T", "OUTSIDE/ //AW-T-S-AH-Y-D", "PARALLEL/ //P-AH-R-UH-L-EH-L", "PHONE/ //F-OH-N", "PHONEME/ //F-OH-N-EH-M", "PHONEMES/ //F-OH-N-EH-M-S", "PLAIN/ //P-L-A-IH-N", "PORT/ //P-OH-R-T", "PRESS/ //P-R-EH-S", "PRINTER/ //P-R-IH-N-T-R", "PROGRAM/ //P-R-OH-G-R-AE-M", "RAIN/ //R-A-IH-N", "RAM/ //R-AH-M", "READY/ //R-EH-D-EE", "RECOVER/ //R-EE-K-UH-V-R", "REMOTE/ //R-EH-M-OH-T", "REMOVE/ //R-EE-M-U-V", "REPLACE/ //R-EE-P-L-A-S", "REPRODUCING/ //R-EE-P-R-OH-D-OO-S-IH-N-G", "RESERVED/ //R-EE-S-U-R-V-D", "RESTORE/ //R-EE-S-T-OH-R", "RETRY/ //R-EE-T-R-I", "RIGHTS/ //R-I-T-S", "SEADOG/ //S-EE-D-OH-G", "SECOND/ //S-EH-K-UH-N-D", "SEE/ //S-EE", "SEEN/ //S-EE-N", "SENTENCE/ //S-EH-N-T-EH-N-S", "SERIAL/ //S-IH-R-EE-AH-L", "SERVICE/ //S-UH-R-V-EH-S", "SEX/ //S-EH-K-S", "SHALL/ //SH-AH-L", "SHITFACED/ //S-H-IH-T-F-A-S-D", "SIDE/ //S-AH-Y-D", "SLOW/ //S-L-OH-W", "SOMETHING/ //S-UH-M-TH-IH-N-G", "SOUND/ //S-AH-W-N-D", "SPAIN/ //S-P-A-IH-N", "SPEECH/ //S-P-EE-CH", "START/ //S-T-AH-R-T", "STARTING/ //S-T-AH-R-T-IH-N-G", "SYSOP/ //S-IH-S-AH-P", "TALK/ //T-AW-K", "TAPE/ //T-A-P", "TELEPHONE/ //T-EH-L-UH-F-OH-N", "TEST/ //T-EH-S-T-T", "THE/ //TZ-UH", "THESE/ //TH-EE-S", "THIS/ //TH-IH-S", "THOM/ //T-AH-M", "TO/ //T-OO", "TOMATO/ //T-UH-M-A-T-OH", "TOO/ //T-OO", "TRY/ //T-R-I", "UNIT/ //Y-OO-N-IH-T", "US/ //UH-S", "USER/ //Y-OO-S-R", "USING/ //Y-OO-S-IH-N-G", "USING/ //Y-OO-Z-IH-N-G", "VERY/ //V-AE-R-E", "VOCABULARY/ //V-OH-K-AE-B-Y-OO-L-AE-R-EE", "WARM/ //W-AH-R-M", "WE/ //W-EE", "WELCOME/ //W-EH-L-K-UH-M", "WORLD/ //W-ER-L-D", "WORKING/ //W-UH-R-K-IH-N-G", "WOULD/ //W-U-D", "YES/ //Y-EH-S", "YOUR/ //Y-OH-R" ] ################################################################################ # # # phonemes data # # # ################################################################################ phonemes_dictionary = { "space": ( 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ), "U": ( 0x3F, 0xFF, 0xFF, 0xF0, 0xFC, 0xFE, 0xFC, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0x80, 0x00, 0x01, 0xFF, 0xFC, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFC, 0x00, 0x01, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x1C, 0x1F, 0xFF, 0xFF, 0xF1, 0xFC, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF0, 0xFC, 0xFF, 0x7C, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0x84, 0x00, 0x01, 0xFF, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFC, 0x00, 0x1F, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x03, 0x83, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xE0, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0x3F, 0x9F, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xC0, 0xC0, 0x00, 0xFF, 0xFE, 0x00, 0x00, 0x47, 0xFF, 0xFF, 0xFE, 0x00, 0x03, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x07, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0xC0, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFC, 0x3F, 0x3F, 0x06, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0x07, 0x00, 0x07, 0xFF, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xF0, 0x00, 0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0x9F, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x80, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x7F, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x30, 0x00, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xE0, 0x00, 0x1F, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x0F, 0x1F, 0xFF, 0xFF, 0xFF, 0xFE, 0x7F, 0x80, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xF8, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFE, 0x03, 0xF0, 0xF8, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xE1, 0xC0, 0x7F, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x0F, 0xFF, 0xFF, 0xFF, 0xFC, 0x3F, 0x80, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFC, 0xFE, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFE, 0x3C, 0x00, 0x7F, 0xFF, 0xC0, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xE0, 0x00, 0x0F, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x3C, 0x7F, 0xFF, 0xFF, 0xFF, 0xF8, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xE1, 0xFC, 0xFC, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xE0, 0x03, 0xE7, 0xFF, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0xF8, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x87, 0xE3, 0xE0, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xE0, 0x3E, 0x3F, 0x80, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x07, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x03, 0xC3, 0xFF, 0xFF, 0xFF, 0xFF, 0x0F, 0xE0, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0x1F, 0x87, 0x80, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xC0, 0x78, 0x3F, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, 0xF0, 0x1F, 0xF3, 0xC0, 0x00, 0x00, 0x00, 0x07, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xC0, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFE, 0x3F, 0x0E, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xC0, 0xF0, 0x7E, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x1F, 0xCF, 0x80, 0x00, 0x00, 0x00, 0x07, 0x0F, 0xFF, 0xFF, 0xFF, 0xFE, 0x3F, 0xC0, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFC, 0x7E, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x01, 0xE0, 0xFC, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x7F, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x70, 0xFF, 0xFF, 0xFF, 0xFF, 0xE1, 0xFE, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xC3, 0xF0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF8, 0x0F, 0x0F, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x01, 0xC1, 0xFF, 0xFF, 0xFF, 0xFF, 0x87, 0xF8, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x0F, 0xE3, 0xC0, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xF0, 0x40, 0x00, 0x7F, 0xFE, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x00, 0x03, 0xFF, 0xFF, 0x80, 0x00, 0x00 ), "A": ( 0xF0, 0x00, 0xFC, 0x03, 0xFF, 0x83, 0xFF, 0xF8, 0x00, 0x7E, 0x00, 0x1F, 0xC0, 0x07, 0xFF, 0x80, 0x1F, 0xE0, 0x07, 0xF0, 0x01, 0xFF, 0x03, 0xFF, 0xFF, 0x7F, 0xFF, 0x03, 0xFF, 0xE0, 0xC0, 0x3F, 0x00, 0x0F, 0xE0, 0x3F, 0xF8, 0x3F, 0xFF, 0x80, 0x01, 0xF8, 0x00, 0xFE, 0x03, 0xFF, 0xC3, 0xFF, 0xF8, 0x00, 0x3F, 0x00, 0x0F, 0xC0, 0x03, 0xF8, 0x77, 0xE7, 0xF0, 0x01, 0xF8, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0x67, 0xFF, 0x80, 0xFF, 0xFC, 0x7C, 0x07, 0xE0, 0x01, 0xFC, 0x07, 0xFF, 0x07, 0xF3, 0xF8, 0x00, 0x3F, 0x00, 0x1F, 0x80, 0x7F, 0xF8, 0x7E, 0xFF, 0x00, 0x07, 0xC0, 0x01, 0xF8, 0x00, 0xFF, 0x0F, 0xF0, 0xFF, 0x00, 0x3F, 0x00, 0x3F, 0xC0, 0x3F, 0xFF, 0xCF, 0xFF, 0xE0, 0x1F, 0xFF, 0x3F, 0x80, 0xFC, 0x00, 0x3F, 0x80, 0xFF, 0xE0, 0xFE, 0x7F, 0x00, 0x07, 0xE0, 0x01, 0xF8, 0x0F, 0xFF, 0x07, 0xEF, 0xF3, 0x00, 0xFC, 0x00, 0x3F, 0x00, 0x0F, 0xE0, 0x7F, 0x8F, 0xE0, 0x03, 0xF0, 0x01, 0xFC, 0x03, 0xFF, 0xFF, 0xCF, 0xFE, 0x00, 0xFF, 0xE3, 0xFF, 0x00, 0xFC, 0x00, 0x7F, 0x00, 0xFF, 0xC0, 0xFC, 0x7F, 0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x1F, 0xFE, 0x1F, 0x8F, 0xFE, 0x01, 0xF8, 0x00, 0x7E, 0x00, 0x3F, 0xC0, 0x7F, 0x1F, 0x00, 0x03, 0xC0, 0x03, 0xF8, 0x0F, 0xFF, 0xF0, 0xFF, 0xFC, 0x01, 0xFF, 0xFF, 0xFF, 0x00, 0xF8, 0x00, 0x7F, 0x00, 0xFF, 0xC1, 0xFC, 0x7F, 0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x1F, 0xFE, 0x1F, 0x0F, 0xE6, 0x00, 0xF8, 0x00, 0x7F, 0x00, 0x1F, 0xFF, 0x8C, 0x0F, 0xE0, 0x07, 0xF0, 0x03, 0xFC, 0x07, 0xFF, 0xE0, 0x0F, 0xFC, 0x00, 0xFF, 0x01, 0xFF, 0x80, 0x7E, 0x00, 0x1F, 0x80, 0xFF, 0xE0, 0xFE, 0x3F, 0x00, 0x07, 0xE0, 0x01, 0xF8, 0x0F, 0xFF, 0x0F, 0xC7, 0xFB, 0x00, 0xFC, 0x00, 0x1F, 0x00, 0x0F, 0xF0, 0x7F, 0x8F, 0xC0, 0x01, 0xE0, 0x01, 0xFE, 0x03, 0xFF, 0xFE, 0xFF, 0xFF, 0x00, 0xFF, 0xE7, 0xFF, 0xE0, 0x1F, 0x80, 0x07, 0xF0, 0x1F, 0xFC, 0x1F, 0xC7, 0xE0, 0x00, 0xFC, 0x00, 0x3F, 0x00, 0xFF, 0xE1, 0xF0, 0xFE, 0x00, 0x0F, 0x80, 0x03, 0xF0, 0x01, 0xFC, 0x0F, 0xE0, 0x3E, 0x00, 0x7F, 0x00, 0x7F, 0xC0, 0x7F, 0xFF, 0xF7, 0xFF, 0xC0, 0x1F, 0xFC, 0x7F, 0xF0, 0x0F, 0xC0, 0x03, 0xF8, 0x0F, 0xFE, 0x0F, 0xC7, 0xF0, 0x00, 0x7E, 0x00, 0x3F, 0x00, 0xFF, 0xF0, 0xFC, 0xFF, 0x00, 0x0F, 0x80, 0x03, 0xF0, 0x01, 0xFE, 0x1F, 0xC0, 0xFE, 0x00, 0x3F, 0x00, 0x3F, 0xC0, 0x7F, 0xFF, 0xCF, 0xFF, 0xE0, 0x07, 0xFE, 0x3F, 0xFC, 0x07, 0xE0, 0x01, 0xFC, 0x07, 0xFF, 0x07, 0xF1, 0xF8, 0x00, 0x3F, 0x00, 0x1F, 0x80, 0x7F, 0xF8, 0x7E, 0x7F, 0x00, 0x07, 0xC0, 0x01, 0xF8, 0x00, 0xFF, 0xCC, 0x03, 0xF8, 0x00, 0xFE, 0x00, 0xFF, 0x80, 0x7F, 0xFF, 0x01, 0xFF, 0x00, 0x3F, 0xF3, 0xFF, 0xF0, 0x0F, 0xC0, 0x07, 0xF0, 0x0F, 0xFC, 0x0F, 0xE3, 0xF0, 0x00, 0x7E, 0x00, 0x3F, 0x00, 0xFF, 0xF0, 0xFC, 0xFF, 0x00, 0x0F, 0x80, 0x03, 0xF0, 0x01, 0xFC, 0x1F, 0xE0, 0x38, 0x00, 0x1E, 0x00, 0x3F, 0xF0, 0xFF, 0xFF, 0xFD, 0x9F, 0xC0, 0x07, 0xF1, 0xFF, 0xFC, 0x01, 0xF8, 0x00, 0xFE, 0x01, 0xFF, 0x81, 0xFD, 0xFE, 0x00, 0x1F, 0x80, 0x07, 0xE0, 0x1F, 0xFC, 0x3F, 0x3F, 0xC0, 0x01, 0xE0, 0x00, 0x7E, 0x00, 0x3F, 0x09, 0xF8, 0x1F, 0xC0, 0x07, 0xC0, 0x0F, 0xFC, 0x1F, 0xFF, 0xF1, 0xFF, 0xFC, 0x01, 0xFF, 0xFF, 0xF8, 0x07, 0xE0, 0x03, 0xF8, 0x07, 0xFE, 0x07, 0xF1, 0xF8, 0x00, 0x3F, 0x00, 0x3F, 0x80, 0xFF, 0xF8, 0x7F, 0xFF, 0x00, 0x07, 0xC0, 0x01, 0xF8, 0x00, 0xFE, 0x1F, 0xC0, 0x7C, 0x00, 0x0F, 0x00, 0x1F, 0xC0, 0xFF, 0xFF, 0xE1, 0xFF, 0xE0, 0x0F, 0xFB, 0xFF, 0xF8, 0x07, 0xE0, 0x01, 0xFC, 0x07, 0xFF, 0x07, 0xF3, 0xF8, 0x00, 0x7E, 0x00, 0x1F, 0x80, 0xFF, 0xF0, 0xFC, 0x7F, 0x00, 0x07, 0x80, 0x01, 0xF8, 0x00, 0xFF, 0x03, 0xE0, 0x08, 0x00, 0x07, 0x00, 0x1F, 0xF8, 0x3F, 0xFF, 0xE7, 0xFF, 0xE0, 0x0F, 0xFF, 0xFF ), "B": ( 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0x98, 0x38, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x38, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xE0, 0x0F, 0xFF, 0x0F, 0x80, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xE7, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xC0, 0xF8, 0x0F, 0x1F, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xE0, 0x07, 0x8F, 0xFF, 0xF0, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xE0, 0xF8, 0x0F, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x10, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x0F, 0x80, 0x60, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x1F, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x03, 0x80, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x3E, 0x7F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0x80, 0x7F, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFE, 0x07, 0x00, 0x20, 0x1C, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x0F, 0x9F, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x01, 0xE1, 0xFF, 0xFF, 0x03, 0x80, 0x38, 0x0F, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x0F, 0xDF, 0xFF, 0xF3, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x3C, 0x3F, 0xFF, 0xF0, 0x78, 0x03, 0x00, 0xE0, 0x00, 0x03, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xBF, 0xFF, 0xF8, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xE7, 0x80, 0x10, 0x70, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x07, 0xC7, 0xFF, 0xFF, 0x0F, 0x80, 0x00, 0x07, 0x03, 0xC7, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF9, 0xFC, 0x0E, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0x9F, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xF0, 0x0C, 0x01, 0xF0, 0xFF, 0xFF, 0x83, 0xE0, 0x3C, 0x07, 0x80, 0x00, 0x3F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x0F, 0xC7, 0xF0, 0xFC, 0x7F, 0xFF, 0xE0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xE0, 0xC0, 0x01, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x1F, 0xF3, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x0F, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xFF, 0xFC, 0x18, 0x00, 0x40, 0x3F, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0x00, 0x00, 0x1F, 0x9C, 0x20, 0x00, 0x08, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x8C, 0x20, 0x00, 0x00, 0x00, 0x00, 0x18, 0x66, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xFF, 0xFE, 0xF8, 0x00, 0x60, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x03, 0xFF, 0xE0, 0x04, 0x06, 0x80, 0x00, 0x00, 0x00, 0x71, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x38, 0x00, 0x00, 0x00, 0x10, 0x30, 0x01, 0x00, 0x0E, 0x0C, 0x10, 0x40, 0x63, 0x04, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x00, 0x21, 0xE0, 0x00, 0x00, 0x00, 0x30, 0x06, 0x00, 0x62, 0x00, 0x00, 0x00, 0x00 ), "D": ( 0xC1, 0xE0, 0xFC, 0xF3, 0x9F, 0xCF, 0x19, 0xC7, 0xE1, 0x9C, 0x39, 0x8E, 0x38, 0x1C, 0xE7, 0x3F, 0x63, 0xB8, 0x71, 0xE1, 0xE1, 0xC0, 0x7E, 0x1C, 0x47, 0x01, 0xC7, 0x70, 0x1E, 0x1B, 0xC3, 0xC0, 0x38, 0x1F, 0x18, 0xE0, 0xF1, 0xF0, 0xFC, 0x00, 0xE0, 0xE0, 0x78, 0x3C, 0xE3, 0x1E, 0x3C, 0xC0, 0xE1, 0xC4, 0xE3, 0x98, 0x78, 0xE1, 0xFC, 0x78, 0x0F, 0xC7, 0x1C, 0x71, 0xC6, 0x38, 0xE7, 0x87, 0x3C, 0x38, 0xE3, 0xF8, 0x1F, 0x01, 0xF0, 0x7C, 0x1F, 0x1C, 0xF0, 0x0F, 0x87, 0x00, 0xE0, 0x00, 0x1C, 0x07, 0x03, 0xF0, 0x7F, 0x1F, 0xE3, 0xF3, 0xC7, 0x18, 0xF8, 0x03, 0x20, 0x1C, 0x1F, 0x00, 0x70, 0x83, 0xC3, 0x80, 0xFC, 0x47, 0xFF, 0xCF, 0xFD, 0xFF, 0xFC, 0x77, 0xE0, 0x00, 0x00, 0x0F, 0x00, 0x40, 0x78, 0x1F, 0x81, 0xFF, 0xFC, 0x7F, 0x8F, 0xFF, 0xE0, 0x1E, 0x04, 0x00, 0x00, 0x3F, 0x00, 0x01, 0x01, 0xFC, 0x01, 0xFF, 0xE3, 0xF0, 0x7F, 0xFF, 0xC7, 0xFF, 0xFF, 0xFF, 0x8F, 0xF0, 0x00, 0x01, 0xE0, 0x0E, 0x01, 0xFF, 0xFC, 0x7F, 0xFF, 0xCF, 0xE0, 0x3E, 0x00, 0x00, 0x00, 0x3C, 0x3E, 0x0F, 0xFF, 0xFE, 0x7E, 0x00, 0x00, 0x00, 0x08, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x01, 0xF0, 0x0F, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0x83, 0xE0, 0x00, 0x00, 0x00, 0x7E, 0xFF, 0xFF, 0xFF, 0xF8, 0x04, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xE0, 0x03, 0xE0, 0x1F, 0x00, 0x3F, 0xFF, 0xFF, 0xFE, 0x0F, 0xE0, 0x00, 0x00, 0x3F, 0xE0, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x0E, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0xE0, 0x07, 0xE0, 0x1F, 0xFF, 0xFF, 0xF0, 0x01, 0xE0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x01, 0xE0, 0x7F, 0xFF, 0xFF, 0x80, 0x3C, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x7F, 0xC0, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x80, 0x70, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x1F, 0xFE, 0x1F, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x01, 0xFE, 0x1F, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, 0x3F, 0xF0, 0x07, 0x81, 0xFE, 0x00, 0x08, 0x03, 0xFF, 0x81, 0xFF, 0x7F, 0xFC, 0x00, 0x00, 0x1F, 0xFF, 0x01, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF0, 0x1E, 0x00, 0x3F, 0x07, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x7F, 0xF0, 0x3F, 0x8F, 0xFF, 0xE0, 0x00, 0x00, 0xC1, 0xF0, 0x1F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x73, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x00, 0x38, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0xFF, 0xF0, 0x7F, 0x8F, 0xFF, 0x00, 0x00, 0x01, 0xE0, 0xF8, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0x1F, 0xF1, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x01, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x30, 0x1F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x0E, 0x00, 0x7F, 0xFF, 0xFF, 0xEF, 0xBF, 0xFC, 0x00, 0x00, 0x1F, 0xFF, 0x1F, 0xE0, 0xFF, 0xFC, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x03, 0xC7, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xF3, 0xFF, 0xFC, 0x00, 0x00, 0xFF, 0xFF, 0xF0, 0x00, 0x03, 0xCF, 0xF0, 0x7F, 0x07, 0xFF ), "G": ( 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xF0, 0x00, 0xFF, 0xC0, 0x00, 0x0F, 0xF8, 0x00, 0xFF, 0xF8, 0x0F, 0xFF, 0x00, 0x3F, 0xFE, 0x1E, 0x07, 0xEF, 0xC0, 0xF0, 0xFC, 0x18, 0x00, 0xFF, 0xE0, 0x0F, 0xFC, 0x00, 0x1F, 0xE0, 0x07, 0xE1, 0x0F, 0xFC, 0x60, 0x07, 0x83, 0xF0, 0x00, 0x7F, 0x01, 0x37, 0xEE, 0x70, 0x7C, 0x00, 0x0F, 0xFF, 0xE1, 0xE0, 0x3F, 0xFF, 0x02, 0x1F, 0xE0, 0x01, 0xFC, 0x0E, 0x02, 0x00, 0xFF, 0xF8, 0x0F, 0xFF, 0x01, 0xF0, 0x00, 0x7E, 0x40, 0x7F, 0xC0, 0x1F, 0xF8, 0x01, 0xFF, 0xC0, 0xEF, 0xF8, 0x38, 0x7F, 0xFE, 0x0F, 0x00, 0xC0, 0xE0, 0x3E, 0x3F, 0x1F, 0xFF, 0x07, 0xF0, 0x00, 0x3F, 0xF0, 0x07, 0xFF, 0x0F, 0xFC, 0xFF, 0xFF, 0x03, 0x00, 0x00, 0x01, 0xE0, 0x3F, 0xFC, 0x03, 0xFF, 0x00, 0x1F, 0xE0, 0x00, 0xFC, 0x00, 0x1F, 0x86, 0x01, 0xF9, 0xC0, 0x08, 0x07, 0x0F, 0xC1, 0xE3, 0xFF, 0xF8, 0xFF, 0xF0, 0x03, 0xC0, 0x80, 0x00, 0x00, 0x06, 0x00, 0x00, 0x0F, 0x00, 0x03, 0xFC, 0x3F, 0xFF, 0xFF, 0xFF, 0x81, 0xFE, 0x18, 0x00, 0x80, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x7C, 0x7F, 0xFF, 0xFF, 0xF8, 0xFF, 0xFF, 0x9F, 0x3F, 0xFF, 0xF0, 0x08, 0x07, 0x80, 0x00, 0x7C, 0x00, 0x07, 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF2, 0x00, 0x00, 0x01, 0xC0, 0x00, 0x0F, 0x00, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x7C, 0x00, 0x00, 0x00, 0x00, 0x70, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x81, 0xFF, 0xFE, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x7E, 0x07, 0xFF, 0xFF, 0xFF, 0xF8, 0x7F, 0x80, 0x00, 0xC0, 0x00, 0x00, 0x01, 0xFF, 0xE0, 0xFF, 0xFF, 0xFF, 0xF0, 0x3F, 0xC0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x78, 0x00, 0x01, 0xF0, 0x7F, 0xFF, 0xFF, 0xFF, 0xC3, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x70, 0x00, 0x00, 0xF1, 0xFF, 0xFF, 0xFF, 0xFF, 0x81, 0xE0, 0x00, 0x00, 0x00, 0x1F, 0x81, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x0F, 0x3F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFE, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0xE0, 0x00, 0x00, 0x00, 0x3E, 0x01, 0xFF, 0xFF, 0xFF, 0xF8, 0x3F, 0x80, 0x60, 0x00, 0x03, 0xF8, 0x1F, 0xFD, 0xFF, 0xFF, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF0, 0x1F, 0x00, 0x00, 0x00, 0x03, 0xFE, 0x3F, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xC0, 0x80, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x06, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xE0, 0x3E, 0x00, 0x00, 0x00, 0x3F, 0xFE, 0x7F, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0xFE, 0x00, 0x01, 0x80, 0x1F, 0xF0, 0x7F, 0xFF, 0xFF, 0xF0, 0x0E, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xBF, 0xE1, 0xFF, 0x80, 0x00, 0x00, 0x01, 0xF0, 0x7F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0x87, 0xCF, 0x98, 0x0C, 0x00, 0xFF, 0xFB, 0xFF, 0xFF, 0x00, 0xE0, 0x00, 0x78, 0x07, 0xE7, 0xFC, 0xFE, 0x07, 0x80, 0x00, 0x00, 0x0F, 0x0F, 0xFF, 0xFC, 0x3F, 0xF0, 0x01, 0x80, 0x00 ), "J": ( 0x1F, 0xC0, 0x3F, 0x03, 0xFC, 0x03, 0xF0, 0x1F, 0x07, 0xC0, 0xFF, 0x83, 0xFC, 0x0F, 0xF8, 0x3F, 0xC0, 0x7F, 0x03, 0xC0, 0xF0, 0x0F, 0xF0, 0x7F, 0x83, 0x1F, 0x01, 0xF0, 0x07, 0xC1, 0x9F, 0x80, 0xF8, 0x73, 0xC0, 0x3F, 0xC3, 0xE2, 0x0F, 0xF8, 0x3F, 0x83, 0xC7, 0x1C, 0x70, 0xF8, 0xF0, 0xF8, 0xE0, 0xFE, 0x0F, 0x00, 0xFF, 0x07, 0xC3, 0xC7, 0x00, 0xEF, 0x0F, 0x1F, 0x0E, 0x3E, 0x11, 0xE1, 0xFE, 0x0F, 0x0E, 0x3F, 0xC1, 0xF0, 0xF8, 0x07, 0x81, 0xF0, 0x63, 0xF0, 0x3F, 0x1E, 0x0F, 0xF0, 0xFF, 0x0F, 0x1E, 0x03, 0xE0, 0x3F, 0x01, 0xE0, 0x7C, 0x03, 0xF0, 0x1F, 0x00, 0x1F, 0x07, 0xF0, 0x3C, 0x3E, 0x07, 0xF8, 0x3F, 0xE1, 0xF0, 0x1C, 0x63, 0xF8, 0x0F, 0xE0, 0x1F, 0x07, 0x01, 0xE1, 0xE1, 0xF8, 0x1F, 0x80, 0x7F, 0xC0, 0xFC, 0x1F, 0x0F, 0x03, 0xF8, 0x40, 0xFF, 0xE0, 0x7C, 0x1F, 0x03, 0xC1, 0xE3, 0x70, 0x43, 0x8F, 0x0F, 0xF8, 0x3F, 0x80, 0xF7, 0x81, 0xF0, 0x3F, 0x0F, 0x00, 0xC7, 0xC0, 0xFE, 0x04, 0x78, 0x78, 0x0F, 0xC0, 0x1F, 0x03, 0xF8, 0x78, 0xF0, 0xF8, 0x3E, 0x1C, 0x3F, 0x83, 0xE0, 0xC7, 0x07, 0x87, 0x81, 0xF8, 0x3F, 0x87, 0x01, 0xC3, 0xC0, 0xFF, 0x03, 0xDE, 0x00, 0xFC, 0x0F, 0x1C, 0x3E, 0x1E, 0x0F, 0x0F, 0x80, 0xFC, 0x00, 0xF1, 0xC3, 0xC3, 0xCF, 0x07, 0x86, 0x0F, 0x80, 0xFE, 0x10, 0x3C, 0x78, 0x71, 0xE0, 0x0F, 0x9C, 0x07, 0xF0, 0x7C, 0x07, 0x81, 0xE1, 0xF0, 0xFC, 0x3E, 0x03, 0xF0, 0xFC, 0xF8, 0xF8, 0x1F, 0xF0, 0x3C, 0x06, 0x7C, 0x07, 0x80, 0x00, 0x00, 0xF8, 0x3F, 0x00, 0xFF, 0x81, 0xFF, 0x8F, 0x00, 0xF8, 0x1C, 0x00, 0x07, 0x02, 0x01, 0xFC, 0x81, 0xFF, 0xFC, 0x3F, 0xFC, 0xFF, 0xE7, 0xF9, 0xC7, 0xC3, 0x80, 0x80, 0x78, 0x00, 0xC0, 0x00, 0x01, 0xFF, 0x87, 0xF0, 0xFF, 0x3F, 0x1F, 0xFF, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x01, 0xC0, 0x00, 0x08, 0x00, 0xC3, 0xF8, 0x3F, 0x9F, 0xFF, 0xFF, 0xFF, 0x3C, 0x00, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFC, 0x00, 0x01, 0xE0, 0x00, 0x0F, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x1F, 0xE0, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x1F, 0xFE, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x7F, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x07, 0xC0, 0x00, 0x00, 0x00, 0x0F, 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x3E, 0x00, 0x01, 0xF8, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x3E, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0x00, 0x00, 0x7E, 0xC0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x0F, 0x00, 0x00, 0x78, 0xF8, 0x7F, 0xFF, 0xFF, 0xFC, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7E, 0x3F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0xC0, 0x00, 0x03, 0xC0, 0x00, 0x7F, 0xFF, 0xFF, 0xC3, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xC0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x80, 0x00, 0xF8, 0x7E, 0x3F, 0x07, 0xFF, 0xC0, 0x30, 0x0E, 0x00, 0x00, 0x3F, 0xFE, 0x0F, 0xFF, 0xFF, 0xF0, 0x00, 0x1C, 0x00, 0x00, 0x38, 0x7C, 0x1F, 0xFF, 0x07, 0x9F, 0x00, 0xF0, 0x00 ), "P": ( 0x01, 0x83, 0xF8, 0x00, 0x1F, 0x7F, 0xCF, 0x3F, 0x80, 0xC0, 0x01, 0xFF, 0xC0, 0x00, 0x78, 0x03, 0xF8, 0x38, 0xC0, 0x04, 0x00, 0x61, 0x00, 0x00, 0x00, 0x00, 0x78, 0xBF, 0xF8, 0x8F, 0x8F, 0xFF, 0xFE, 0x7F, 0xFF, 0xFB, 0xFF, 0xFC, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0xE0, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x3F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFC, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xFE, 0x03, 0xFF, 0xFF, 0xFF, 0xFC, 0x03, 0xF0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x0F, 0xFF, 0xE0, 0x00, 0x00, 0x47, 0xFC, 0x3E, 0x0E, 0x00, 0x00, 0x00, 0x63, 0xFF, 0xFC, 0x00, 0x00, 0x78, 0x3E, 0x00, 0xE1, 0xC3, 0x00, 0x00, 0x3C, 0x03, 0xFF, 0xF0, 0xC0, 0x03, 0xFF, 0xFF, 0xFE, 0x07, 0xC0, 0x1F, 0xFF, 0x1F, 0xE0, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x10, 0xFF, 0xE0, 0x60, 0x18, 0x03, 0xFC, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x01, 0xFC, 0x00, 0x1F, 0x01, 0xFE, 0x3F, 0xFF, 0xFF, 0xF8, 0x0F, 0x00, 0xFF, 0xFF, 0xFF, 0x81, 0xE0, 0x00, 0xFE, 0x08, 0x00, 0x00, 0x00, 0xFF, 0x9F, 0xC0, 0x00, 0x00, 0x00, 0x73, 0xFF, 0xF8, 0xE0, 0x18, 0x10, 0x00, 0x3C, 0x03, 0xC7, 0xCF, 0xF1, 0xFF, 0xFF, 0xFF, 0xFC, 0xE0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xC7, 0xFF, 0xF0, 0x00, 0x00, 0x1C, 0x7F, 0x7F, 0xC0, 0xC3, 0x80, 0xFC, 0x00, 0x7F, 0xF9, 0xE2, 0x01, 0xF0, 0xFF, 0xFF, 0xF1, 0xFC, 0x7F, 0xFC, 0x3F, 0xFF, 0xE0, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xC0, 0x80, 0x00, 0x00, 0x00, 0xFC, 0x7F, 0xFF, 0x00, 0x00, 0x00, 0x07, 0xF3, 0xFE, 0x00, 0xE7, 0xFF, 0xFF, 0xFF, 0xFF, 0xF9, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xFF, 0xFF, 0xF0, 0x00, 0x02, 0x03, 0xE0, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x07, 0x0F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x71, 0x70, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0xFC, 0x7F, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0xF8, 0x03, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x3F, 0x30, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0x07, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF0, 0x01, 0x0F, 0xFF, 0xFF, 0xFF, 0xF3, 0xC0, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x03, 0x00, 0x01, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x03 ), "T": ( 0x1F, 0x03, 0xC1, 0xE0, 0xF8, 0x07, 0xC1, 0xE0, 0xF0, 0x7C, 0x1F, 0x04, 0x3F, 0x1C, 0xE6, 0x1C, 0x86, 0x71, 0xC1, 0xF8, 0x01, 0xF8, 0x87, 0x1F, 0x87, 0x81, 0xF0, 0xF0, 0x71, 0x1F, 0x8F, 0x1E, 0xE3, 0xE7, 0x33, 0x1C, 0xC7, 0x3B, 0x98, 0x39, 0xC9, 0x9C, 0x78, 0xE7, 0x39, 0xC7, 0x67, 0x27, 0xC6, 0x3A, 0x39, 0x99, 0xB8, 0xF0, 0x78, 0xF3, 0x83, 0x8E, 0xE7, 0x71, 0xCC, 0x79, 0x1F, 0x1F, 0x1F, 0x0F, 0x03, 0x89, 0xE3, 0xD8, 0xF0, 0x78, 0xFC, 0x0E, 0x39, 0xCC, 0xE0, 0xEE, 0x73, 0x1C, 0x72, 0x31, 0x99, 0x8F, 0x1E, 0x71, 0xF0, 0x78, 0x78, 0xCF, 0x30, 0x78, 0x71, 0x9C, 0x3C, 0xCE, 0x7F, 0x04, 0xE0, 0xF0, 0x9E, 0x0F, 0x03, 0xC3, 0xE1, 0xF8, 0x8E, 0x63, 0x8E, 0x1F, 0x8E, 0x39, 0x8E, 0x0F, 0x07, 0x87, 0x1C, 0x0E, 0x3F, 0x07, 0x0F, 0xC3, 0x9F, 0x1C, 0x40, 0xCC, 0xF2, 0x38, 0xF6, 0x3E, 0x3E, 0x07, 0x30, 0xF8, 0xFE, 0x7E, 0x77, 0x03, 0x1E, 0x03, 0xC7, 0x03, 0xE0, 0x71, 0xC7, 0x8F, 0x41, 0x0F, 0x07, 0xE0, 0xE1, 0x9C, 0x70, 0x3C, 0x1E, 0x1E, 0x43, 0xE3, 0xB8, 0xDC, 0x78, 0x67, 0x03, 0x0F, 0x87, 0xE0, 0xF0, 0x78, 0x1C, 0xC7, 0x1F, 0x86, 0x1F, 0xC7, 0xE1, 0xC0, 0x7C, 0x1E, 0x1E, 0x33, 0x81, 0xC0, 0xFE, 0x66, 0x71, 0x9F, 0x03, 0xC1, 0xF8, 0x0F, 0x07, 0x00, 0x60, 0x38, 0xF0, 0x3C, 0xC3, 0x39, 0xCF, 0x87, 0xC1, 0xF0, 0x0E, 0x1E, 0x38, 0xF8, 0x0F, 0x00, 0xF8, 0xF0, 0x18, 0xC7, 0x98, 0x3F, 0x87, 0xE1, 0xC0, 0x7F, 0x0D, 0xC7, 0xC1, 0xC0, 0xF0, 0xFC, 0x7C, 0x3F, 0x01, 0xF8, 0xE1, 0xE1, 0xC0, 0xF0, 0x3E, 0x0F, 0x83, 0xC0, 0xE7, 0x1F, 0x83, 0xF8, 0xF8, 0x79, 0xCF, 0xC1, 0xF0, 0xFC, 0x00, 0x18, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x7E, 0x02, 0x00, 0x00, 0x00, 0x03, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xEF, 0xE1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x06, 0x3F, 0xFE, 0xFC, 0x3F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFD, 0xFF, 0xFF, 0xFF, 0xF1, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x20, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x02, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x38, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xB8, 0x01, 0x7F, 0xF8, 0x00, 0x00, 0xC0, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xFF, 0x1F, 0x7E, 0x70, 0xE0, 0x3F, 0xE0, 0x40, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xFF, 0xBF, 0x0E, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ), "K": ( 0x00, 0x00, 0x01, 0xE0, 0x00, 0x01, 0x00, 0x00, 0x00, 0xF0, 0x00, 0x03, 0xC0, 0x00, 0x06, 0x1C, 0x80, 0x08, 0x00, 0x00, 0x80, 0xE0, 0x70, 0x00, 0x8C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x40, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xC0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00, 0x00, 0x00, 0x08, 0x02, 0x04, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x88, 0x60, 0x0F, 0xE3, 0x83, 0xF9, 0xC3, 0xF9, 0xE0, 0x0F, 0xFC, 0x00, 0xFF, 0xC0, 0x07, 0xF8, 0x06, 0x3E, 0x0F, 0x03, 0xF8, 0x60, 0x7E, 0x00, 0xE0, 0x7E, 0x78, 0x07, 0xF7, 0x01, 0xFF, 0xE0, 0x03, 0xFF, 0x00, 0xFF, 0xE0, 0x01, 0xFE, 0x00, 0x1F, 0xFF, 0x00, 0xFF, 0xC0, 0x3F, 0xFC, 0x03, 0x87, 0x40, 0x0F, 0xFE, 0x00, 0xFF, 0x00, 0x03, 0xFB, 0x00, 0xFF, 0x00, 0x07, 0x97, 0xC0, 0x0F, 0xF0, 0x0D, 0xFF, 0x00, 0x0F, 0xB8, 0x01, 0xFE, 0x00, 0x3F, 0xB8, 0x03, 0x3E, 0xC0, 0x3F, 0xF8, 0x06, 0x7F, 0x00, 0xCF, 0x80, 0x03, 0xFE, 0x00, 0x83, 0xE0, 0x00, 0x7C, 0x0F, 0xFF, 0x01, 0xDF, 0xC0, 0x0F, 0xC7, 0xE0, 0x10, 0x1E, 0x07, 0x87, 0xC0, 0xF1, 0xF8, 0x63, 0x1C, 0xE0, 0x00, 0xFC, 0x00, 0x1F, 0x07, 0x07, 0x81, 0xE7, 0xE0, 0xDE, 0x3C, 0x0F, 0x8F, 0x01, 0xFE, 0x7C, 0x3F, 0xBD, 0x83, 0xF1, 0xC0, 0x3D, 0xC4, 0x78, 0x3E, 0x0E, 0x7F, 0x80, 0xFF, 0xE0, 0x03, 0xDC, 0x43, 0x07, 0xFC, 0x03, 0xC0, 0x70, 0xF8, 0x7E, 0x1F, 0x0F, 0x07, 0xC1, 0xF0, 0x78, 0x3E, 0x3F, 0x07, 0xEF, 0x81, 0xC1, 0xF0, 0xF8, 0x0F, 0xFC, 0x07, 0xFE, 0x03, 0xE0, 0x00, 0x38, 0x1F, 0x8F, 0x87, 0xC0, 0xF0, 0x1F, 0x07, 0xC7, 0x81, 0xE0, 0xFE, 0x18, 0x0F, 0xC0, 0x07, 0xFF, 0x80, 0xFF, 0x80, 0x0F, 0xFE, 0x07, 0xCF, 0xC1, 0x00, 0xFC, 0x04, 0x0F, 0x87, 0x80, 0xFF, 0x78, 0x1F, 0xFE, 0x07, 0x7F, 0x03, 0xEF, 0xE0, 0x00, 0xFF, 0x00, 0x1F, 0xE0, 0x1F, 0x8F, 0x0F, 0x03, 0xC1, 0xE0, 0x7C, 0xF8, 0x38, 0x7F, 0x07, 0x1F, 0x8F, 0x86, 0x31, 0xFE, 0x00, 0x0F, 0xF0, 0x01, 0xFC, 0xC0, 0x1F, 0xCF, 0x00, 0x0F, 0xE0, 0x30, 0x7E, 0x3C, 0x1F, 0xFC, 0x01, 0xFF, 0xC0, 0x3F, 0xF8, 0x01, 0xFF, 0x00, 0x03, 0xFC, 0x00, 0x07, 0xC7, 0x80, 0x3F, 0xC0, 0xF0, 0x7C, 0x00, 0x1F, 0xE0, 0xF0, 0x70, 0xF8, 0x3F, 0x8C, 0x07, 0x83, 0xE1, 0xF0, 0xFC, 0x00, 0x1F, 0x80, 0x07, 0xC3, 0x03, 0x73, 0x80, 0x76, 0x78, 0x3C, 0x00, 0xF0, 0x38, 0x38, 0x1F, 0x0E, 0x1C, 0xF0, 0x00, 0xF0, 0xC0, 0x1C, 0x38, 0x01, 0xCE, 0x38, 0xEE, 0x7F, 0xFC, 0xE0, 0x7F, 0x80, 0x0F, 0xF0, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x7F, 0xFF, 0xFC, 0x07, 0xFF, 0x1E, 0x7F, 0xCF, 0x9F, 0x80, 0x00, 0x00, 0x30, 0x0E, 0x00, 0xFF, 0x80, 0xFF, 0xF0, 0x18, 0x00, 0x00, 0x00, 0x01, 0x07, 0x38, 0xFF, 0x00, 0x3F, 0xF0, 0x1B, 0x3F, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFC, 0x7F, 0xFE, 0x07, 0xFF, 0xC3, 0xDF, 0xF8, 0x1C, 0x3F, 0xF8, 0x00, 0xEF, 0x00, 0x1F, 0xF7, 0x0F, 0xFF, 0x01, 0xC0, 0xFE, 0xC0, 0x1F, 0xF8, 0x00, 0x07, 0x80, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x1C, 0x0F, 0x03, 0x8F, 0xC0, 0x00, 0xFE, 0x3E, 0x7F, 0xFF, 0xFF, 0xDF, 0xFF, 0xFF, 0xF8, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x07, 0xFF, 0xC0, 0xE7, 0xFF, 0xF0, 0x3F, 0xF0, 0x04, 0x1F, 0xC0, 0x07, 0xFE, 0x00, 0x10, 0xFC, 0x00, 0x00, 0x30, 0x01, 0xDF, 0x00, 0x1F, 0x01 ), "W": ( 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x9C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x07, 0x80, 0xF0, 0x08, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0xC7, 0xFF, 0xFF, 0xC3, 0xC0, 0x00, 0x00, 0x40, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x0F, 0x8F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x03, 0xFF, 0xFE, 0x0F, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x03, 0x01, 0xFF, 0xFF, 0x9F, 0x80, 0x00, 0x0C, 0x03, 0xE0, 0x01, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xC0, 0x0C, 0x7F, 0xFF, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x01, 0xF8, 0x30, 0x00, 0x0F, 0xFF, 0xF8, 0x00, 0x00, 0x01, 0xFF, 0xFE, 0x00, 0x00, 0x0F, 0xFF, 0xF0, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x03, 0xFF, 0xE0, 0x00, 0x1F, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFC, 0x00, 0x00, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x03, 0xFF, 0xE0, 0x00, 0x00, 0x7F, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0xFC, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xC0 ), "Y": ( 0x00, 0x00, 0x30, 0x00, 0x04, 0x00, 0x06, 0x00, 0xC7, 0x80, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x03, 0x00, 0xC0, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x03, 0xC7, 0xFC, 0x7F, 0x00, 0xFC, 0x07, 0x83, 0xC0, 0x7F, 0xF1, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0xFC, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x07, 0xF7, 0xFF, 0xFF, 0xF9, 0xFF, 0xFF, 0x87, 0xF0, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x78, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC3, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF0, 0x1F, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0xC0, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xE1, 0xF8, 0xFF, 0xFF, 0xFF, 0xF8, 0x7F, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x01, 0xFC, 0xFF, 0x9F, 0xFF, 0xFF, 0xFF, 0xE1, 0xFE, 0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0xFE, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x03, 0xF0, 0xFC, 0x3F, 0xFF, 0xFF, 0xFF, 0xC3, 0xF8, 0x01, 0xF8, 0x00, 0x80, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0xF0, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x03, 0xFF, 0xFE, 0x1F, 0xFF, 0xFF, 0xFF, 0xE0, 0xFC, 0x03, 0xE0, 0x00, 0x00, 0x00, 0xF0, 0x1F, 0xF3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x80, 0xFF, 0xFF, 0xFF, 0xFC, 0x3F, 0xEF, 0xFF, 0xFF, 0x81, 0xF0, 0x01, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x01, 0xF8, 0x00, 0x80, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFC, 0x7F, 0xFC, 0x3F, 0xFE, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x0F, 0x80, 0x03, 0x80, 0x00, 0x7C, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFF, 0xC0, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0xFF, 0xF1, 0xFF, 0xFF, 0xFF, 0xFE, 0x07, 0xC0, 0x00, 0x0F, 0x00, 0x03, 0x80, 0x00, 0xFC, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x1F, 0xC0, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x80, 0x3F, 0xF0, 0x7F, 0xFE, 0x1F, 0xFC, 0x1F, 0xFF, 0x03, 0xFF, 0x0F, 0xC0, 0x00, 0x1F, 0x00, 0x0F, 0x00, 0x01, 0xFC, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x3F, 0x03, 0xFF, 0xFF, 0xE0, 0xFF, 0x00, 0x1F, 0xC0, 0x07, 0xF8, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x03, 0xE0, 0x00, 0x78, 0x00, 0x3F, 0x83, 0xFF, 0xFF, 0xFF, 0xDF, 0xFF, 0x00, 0x38, 0x00, 0x00, 0x00, 0x03, 0xFC, 0xFF, 0xFF, 0xFF, 0xF0, 0x01, 0xC0, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x78, 0x00, 0x1F, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x03, 0xFF, 0xE7, 0xFF, 0xF0, 0x7F, 0x00, 0x7F, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFC, 0x00, 0x1E, 0x01, 0x0F, 0xF0, 0xFF, 0xF0, 0x3F, 0xE0, 0x20, 0x00, 0x03, 0xE0, 0x03, 0xFF, 0xFF, 0xFF, 0xC3, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x7E, 0x01, 0xFF, 0xFF, 0xFF, 0xE1, 0xFF, 0x00, 0x3C, 0x03, 0xFF, 0xFE, 0x01, 0xF8, 0x00, 0x78, 0x1F, 0xFF, 0xC0, 0xFF, 0x01, 0xFF, 0x00, 0xFC, 0x1E, 0x0F, 0xE0, 0xFF, 0xDE, 0xFF, 0x83, 0x80, 0x00, 0x0E, 0x00, 0x03, 0xF0, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x10, 0x0F, 0xF0, 0xF8, 0xFE, 0x07, 0xF0, 0x00, 0x00, 0x18, 0x78, 0x79, 0xFE, 0x0C, 0x3F, 0x06, 0x00, 0x01, 0xF0, 0xF0, 0x7F, 0xFF, 0xE1, 0x81, 0xE1, 0xE0, 0x00, 0x07, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xC0, 0x03, 0xFF, 0xF0, 0xFC, 0x3F, 0x1E, 0x00, 0x00, 0x1C, 0x1F, 0x0F, 0xFF, 0xFE, 0x3C ), "R": ( 0xFF, 0xF8, 0x07, 0x0F, 0xFF, 0xF0, 0x00, 0x00, 0x01, 0xF0, 0xFE, 0x3F, 0x87, 0xF3, 0xFF, 0xFC, 0x00, 0x00, 0x1F, 0xFF, 0x00, 0x01, 0xFF, 0xFF, 0xC0, 0x00, 0x01, 0xF0, 0x00, 0x07, 0xFF, 0xFE, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xF0, 0x00, 0x1F, 0xFF, 0xE0, 0x00, 0x00, 0x03, 0xE1, 0xFF, 0xFF, 0x1F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x3F, 0xFF, 0x00, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xC0, 0x00, 0x1F, 0xFF, 0xFF, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x1F, 0x87, 0xF3, 0xFC, 0x7F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x3F, 0xFC, 0x00, 0x0F, 0xFF, 0xFE, 0x00, 0x00, 0x07, 0x80, 0x00, 0x3F, 0xFF, 0xFE, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0x00, 0x01, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x7E, 0x1F, 0xCF, 0xF1, 0xFC, 0xFF, 0xFF, 0x80, 0x00, 0x01, 0xFF, 0xF0, 0x00, 0x7F, 0xFF, 0xFC, 0x00, 0x00, 0x7E, 0x00, 0x01, 0xFF, 0xFF, 0xF8, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x07, 0xFF, 0xFE, 0x00, 0x07, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0xF8, 0x7F, 0xFF, 0xE7, 0xF3, 0xFF, 0xFE, 0x00, 0x00, 0x01, 0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x68, 0x00, 0x01, 0xFF, 0xFF, 0xF8, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x0F, 0xFF, 0xFC, 0x00, 0x0F, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xF0, 0xFF, 0xFF, 0xCF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x03, 0xFF, 0xE0, 0x00, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0xF0, 0x00, 0x07, 0xFF, 0xFF, 0xFC, 0x07, 0xFF, 0xFF, 0xE7, 0xFF, 0xE0, 0x00, 0x00, 0x07, 0xFF, 0xFC, 0x00, 0x07, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xF8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x01, 0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x30, 0x00, 0x01, 0xFF, 0xFF, 0xF8, 0x01, 0xFF, 0xFF, 0xBF, 0xFF, 0xE0, 0x00, 0x00, 0x07, 0xFF, 0xFC, 0x00, 0x07, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0xF8, 0x7F, 0xFF, 0xC7, 0xFB, 0xFF, 0xFE, 0x00, 0x00, 0x01, 0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0xF8, 0x00, 0x03, 0xFF, 0xFF, 0xC0, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xF0, 0x00, 0x3F, 0xFF, 0xE0, 0x00, 0x00, 0x03, 0xE1, 0xFE, 0xFF, 0x1F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x1F, 0xFE, 0x00, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xC0, 0x00, 0x0F, 0xFF, 0xFF, 0x80, 0x1F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xC0, 0x00, 0x7F, 0xFF, 0xC0, 0x00, 0x00, 0x0F, 0x83, 0xF1, 0xFE, 0x3F, 0x9F, 0xFF, 0xE0, 0x00, 0x00, 0xFF, 0xFC, 0x00, 0x0F, 0xFF, 0xFE, 0x00, 0x00, 0x0F, 0x80, 0x00, 0x3F, 0xFF, 0xFE, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xC0, 0x00, 0x7F, 0xFF, 0xC0, 0x00, 0x00, 0x0F, 0x87, 0xFB, 0xFE, 0x7F, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0xFF, 0xFC, 0x00, 0x0F, 0xFF, 0xFF, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x1F, 0x0F, 0xF7, 0xFC, 0x7F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0xFF, 0xF8, 0x00, 0x1F, 0xFF, 0xFC, 0x00, 0x00, 0x1F, 0x00, 0x00, 0xFF, 0xFF, 0xFC, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0x00, 0x01, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0x1F, 0xEF, 0xF8, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x01, 0xFF, 0xF8, 0x00, 0x07, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x0E, 0x00, 0x6F, 0xFF, 0xFF, 0x80, 0x00, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0x00, 0x01, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x7F, 0x1F, 0xCF, 0xF1, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xFF, 0xF0, 0x00, 0x7F, 0xFF, 0xF0, 0x00, 0x00, 0x3C, 0x00, 0x01, 0xFF ), "L": ( 0xFE, 0xF0, 0x00, 0x00, 0x00, 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0x06, 0x00, 0x00, 0x00, 0x1C, 0x1F, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x02, 0x07, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x03, 0x8F, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x01, 0xE1, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x38, 0x3F, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x9F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x07, 0x1F, 0xFF, 0xFF, 0xFF, 0xFC, 0x80, 0x00, 0x00, 0x03, 0x83, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x70, 0x7F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x07, 0x9F, 0xFF, 0xFF, 0xFF, 0xF8, 0x18, 0x00, 0x00, 0x00, 0x70, 0x7F, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x08, 0x1F, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0x07, 0xFF, 0xFF, 0xFF, 0xFC, 0x1C, 0x00, 0x00, 0x00, 0x38, 0x7F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x08, 0x1F, 0xCF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x03, 0xCF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x03, 0xC1, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x38, 0x3F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x04, 0x0F, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x01, 0xC3, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x03, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xF9, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x07, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x01, 0x83, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x7D, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x01, 0xC1, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x38, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x38, 0x7F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xEF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x07, 0x1F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x01, 0x87, 0xFF, 0xFF, 0xFF, 0xFF, 0xFB, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xEF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0xF1, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x07, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x01, 0x83, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFD, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x03, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF7, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x38, 0x3F, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x04, 0x0F, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF3, 0xFF, 0xFF, 0xFF ), "M": ( 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0xF8, 0x0C, 0x00, 0x00, 0x00, 0x3E, 0x07, 0x81, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x1F, 0xFF, 0x07, 0x80, 0x20, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x78, 0x00, 0x0C, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x01, 0xF0, 0x18, 0x00, 0x00, 0x00, 0x7E, 0x07, 0x81, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x3F, 0xFF, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x03, 0x06, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x01, 0xF0, 0x18, 0x00, 0x00, 0x00, 0x7C, 0x07, 0x83, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x3F, 0xFE, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xE0, 0x60, 0x00, 0x00, 0x00, 0xF8, 0x1E, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFF, 0xFC, 0x3E, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x1C, 0x3E, 0x00, 0x0E, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x3F, 0x03, 0x00, 0x00, 0x00, 0x0F, 0x80, 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x0F, 0xFF, 0xC3, 0xE0, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xE0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x01, 0xF0, 0x38, 0x00, 0x00, 0x00, 0x7C, 0x0F, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x7F, 0xFE, 0x0F, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xF8, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x1F, 0x01, 0x80, 0x00, 0x00, 0x07, 0xC0, 0xF0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xE1, 0xF0, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xC0, 0xF0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0xF8, 0x0C, 0x00, 0x00, 0x00, 0x3E, 0x07, 0x81, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x1F, 0xFF, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFC, 0x10, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x1F, 0x01, 0x80, 0x00, 0x00, 0x07, 0xC0, 0xF0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xFF, 0xE1, 0xF0, 0x00, 0x00, 0x01, 0x80, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0xF8, 0x1C, 0x00, 0x00, 0x00, 0x3E, 0x07, 0x81, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x3B, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x07, 0xC0, 0xC0, 0x00, 0x00, 0x01, 0xF0, 0x3C, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x81, 0xFF, 0xF8, 0xFC, 0x02, 0x00, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x3E, 0x07, 0x00, 0x00, 0x00, 0x0F, 0x81, 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x0F, 0xFF, 0xC3, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x08, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x01, 0xF0, 0x18, 0x00, 0x00, 0x00, 0x7C, 0x0F, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x7F, 0xFE, 0x1F, 0x00, 0x80, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xC0, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x7E, 0x02, 0x00, 0x00, 0x00, 0x1F, 0x81, 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x0F, 0xFF, 0xC3, 0xC0, 0x00, 0x00, 0x02, 0x00 ), "N": ( 0x01, 0xFF, 0xFF, 0x00, 0x00, 0x18, 0x78, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x01, 0xFF, 0xF1, 0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x0F, 0xFF, 0xE7, 0x07, 0xFF, 0xFF, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x3F, 0xFF, 0xE0, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0xE0, 0x00, 0x00, 0x3F, 0xFF, 0x1C, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x03, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x38, 0x00, 0x00, 0x07, 0x80, 0x00, 0x00, 0xFF, 0xFE, 0x38, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x07, 0xFF, 0xFE, 0x00, 0x00, 0x01, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xE0, 0x40, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xE1, 0x41, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x7F, 0xFF, 0xE0, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x1F, 0xFF, 0x1F, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x07, 0xF9, 0xFE, 0x00, 0x00, 0x03, 0xE0, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x07, 0xFF, 0xF9, 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x3F, 0xFF, 0xF0, 0x00, 0x00, 0x0F, 0x80, 0x00, 0x00, 0x70, 0x00, 0x00, 0x3F, 0xFF, 0xF8, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x07, 0xFF, 0x0F, 0x80, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0x3F, 0xFF, 0xFF, 0xC0, 0x1F, 0xFF, 0xF8, 0x00, 0x00, 0x0F, 0x80, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x3F, 0xFC, 0x3C, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0x7F, 0xFF, 0xFF, 0x80, 0x7F, 0x9F, 0xF0, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x7E, 0x00, 0x80, 0x7F, 0xF8, 0x7C, 0x01, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x01, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0x80, 0x7F, 0x9F, 0xF0, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x80, 0x7F, 0xF8, 0x7B, 0x43, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x07, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xF9, 0xFF, 0xFF, 0xFE, 0x01, 0xFE, 0x3F, 0xC0, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x01, 0xF8, 0x06, 0x01, 0xFF, 0xC1, 0xF8, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x01, 0x80, 0x0F, 0xFF, 0xFF, 0xCF, 0xFF, 0xFF, 0xF8, 0x07, 0xF8, 0xFF, 0x00, 0x00 ), "S": ( 0x07, 0x81, 0xF0, 0xE0, 0xF0, 0x78, 0x3E, 0x38, 0xF1, 0x98, 0x78, 0x7E, 0x1F, 0x0F, 0x0E, 0x3C, 0x70, 0x78, 0x3C, 0x1E, 0x00, 0xCE, 0x3C, 0x3F, 0x0F, 0x03, 0xE1, 0xE1, 0x18, 0x0E, 0x01, 0xE0, 0xF0, 0xF8, 0x7C, 0x78, 0xFE, 0x3E, 0x0F, 0x0F, 0x83, 0xE0, 0xE0, 0xE0, 0xF0, 0xF0, 0x7C, 0x3C, 0x3C, 0x3E, 0x3E, 0x1E, 0x0F, 0x07, 0x78, 0xFC, 0x3C, 0x3C, 0x71, 0x87, 0x38, 0x7C, 0x1E, 0x1F, 0x00, 0xF0, 0xF8, 0x7C, 0x63, 0xF1, 0xE0, 0xF8, 0x7E, 0x1E, 0x07, 0x87, 0x83, 0x1E, 0x1E, 0x1F, 0x81, 0xE1, 0xE0, 0x70, 0xF0, 0xF8, 0x78, 0x70, 0xFE, 0x07, 0x0F, 0x00, 0xF0, 0xF8, 0xF8, 0x3C, 0x3C, 0x1F, 0x0F, 0x01, 0xC0, 0xE0, 0x63, 0x83, 0x87, 0x81, 0xE0, 0xF0, 0xF8, 0x3C, 0x70, 0x0E, 0x00, 0x38, 0xC6, 0x1E, 0x43, 0x8C, 0x70, 0x78, 0x3F, 0x0F, 0x87, 0xE1, 0xF8, 0x7C, 0x3B, 0xC3, 0xC0, 0xE1, 0xE1, 0xE0, 0xE1, 0xE1, 0xE0, 0xF8, 0x7C, 0x03, 0x87, 0x18, 0xF0, 0xE0, 0x61, 0xC1, 0xE7, 0x3C, 0x38, 0xC0, 0xE0, 0x3C, 0x0F, 0x38, 0xFE, 0x1F, 0x83, 0xE1, 0xE0, 0x78, 0xF0, 0x01, 0xC3, 0xC1, 0xE1, 0xC0, 0xF0, 0x78, 0x3E, 0x1F, 0x00, 0xF0, 0xF0, 0x78, 0x78, 0x1E, 0x39, 0xC3, 0xC1, 0x3E, 0x1E, 0x0F, 0x1D, 0xC3, 0xC1, 0xE1, 0xC0, 0x1F, 0x1F, 0xC3, 0xC3, 0xE0, 0xF8, 0x3E, 0x1F, 0x07, 0x87, 0xC0, 0xC7, 0x07, 0x07, 0xC0, 0xF8, 0xF0, 0xF0, 0x07, 0x07, 0x80, 0xF8, 0x7E, 0x1E, 0x3E, 0x17, 0x87, 0x0F, 0x03, 0xC3, 0xE3, 0xE0, 0xFF, 0x07, 0x80, 0x38, 0x0E, 0x03, 0xC1, 0xC1, 0xE1, 0xF0, 0xF0, 0x47, 0xC0, 0xF8, 0x7C, 0x3E, 0x1E, 0x19, 0x87, 0x80, 0xF0, 0x7C, 0x1E, 0x0E, 0xF0, 0xE0, 0xF0, 0xC7, 0x07, 0xE0, 0x7C, 0x3C, 0x3F, 0x03, 0xC3, 0xC1, 0xE0, 0xF8, 0x7F, 0x0F, 0x18, 0xE0, 0xE0, 0xF0, 0x78, 0x3F, 0x0F, 0x80, 0xF0, 0xF8, 0x0F, 0x0F, 0x1F, 0x07, 0x83, 0xE0, 0xF1, 0x9C, 0x07, 0x03, 0x1C, 0x70, 0xC7, 0x03, 0x87, 0xC1, 0xF0, 0xF0, 0xFC, 0x3E, 0x0F, 0x87, 0x83, 0xC0, 0xF8, 0x78, 0x79, 0xC7, 0x3C, 0x70, 0x7E, 0x1C, 0x71, 0x87, 0x1E, 0x1E, 0x3E, 0x1F, 0x03, 0x86, 0x1E, 0x1F, 0x01, 0xC3, 0xC3, 0xE0, 0xF0, 0xF0, 0xF8, 0x3C, 0x1F, 0x07, 0x1E, 0x38, 0xF8, 0x70, 0x3E, 0x3F, 0x83, 0xC0, 0x78, 0x71, 0xE0, 0xF8, 0x0F, 0x0F, 0x00, 0xF0, 0x1E, 0x1F, 0x0F, 0x1F, 0x0F, 0xC1, 0xF1, 0xF8, 0xE1, 0xE0, 0xE3, 0x0E, 0x1F, 0xC3, 0x0F, 0x07, 0x1C, 0x70, 0xF8, 0x78, 0x7C, 0x3E, 0x3C, 0x1F, 0xC3, 0xC0, 0xF8, 0x30, 0x78, 0x3F, 0x1F, 0x01, 0xF0, 0x7C, 0x3F, 0x1E, 0x3F, 0x1C, 0x1F, 0x0F, 0x07, 0x83, 0xC0, 0xF0, 0x78, 0x3E, 0x1E, 0x78, 0x71, 0x87, 0x0F, 0x7E, 0x03, 0xC3, 0x70, 0xE3, 0x07, 0x01, 0xE1, 0xE3, 0x98, 0xF8, 0x77, 0x01, 0x0C, 0x1C, 0x1C, 0x27, 0x0F, 0x01, 0xC1, 0xC3, 0x80, 0xC1, 0xF0, 0xF0, 0x7E, 0x1F, 0x0F, 0x07, 0xC3, 0xF0, 0x78, 0x0F, 0x38, 0xE1, 0xC1, 0xE1, 0xF0, 0x3E, 0x0F, 0x83, 0xC3, 0x87, 0x80, 0xF0, 0xF0, 0x1F, 0x01, 0xE1, 0xF0, 0xC7, 0x0F, 0x11, 0xC3, 0xC0, 0xF8, 0x7C, 0x3C, 0x3E, 0x0F, 0x0E, 0x70, 0x8E, 0x3D, 0x87, 0xC7, 0x18, 0xF0, 0x7C, 0x3E, 0x1F, 0x07, 0x8C, 0xF0, 0x3F, 0x8F, 0x0E, 0x1B, 0xC3, 0x87, 0x07, 0x83, 0xE0, 0xF0, 0x78, 0xC7, 0x18, 0xE3, 0xC3, 0xE1, 0xE0, 0x7C, 0x3C, 0x71, 0x9C, 0x70, 0x78, 0xF0, 0xC6, 0x3E, 0x1F, 0x0F, 0x80, 0xE0, 0x1C, 0x3E, 0x3C, 0x87, 0x0E, 0x71, 0x87, 0x83, 0xC2, 0x3C, 0x3E, 0x1E, 0x18, 0xC7, 0x83, 0xC3, 0x80, 0xF0, 0xF1, 0x8E, 0x19, 0xC1, 0xC0, 0x78, 0x7C, 0x3C, 0x0F, 0x87, 0x73, 0x8F, 0x0F, 0x0F, 0x0E, 0x3E, 0x0F, 0x0C, 0xF8, 0x78, 0x78, 0xC0, 0xF0, 0xE1, 0xC0, 0xF0, 0xF0, 0x1E, 0x1E, 0x10, 0xF0, 0xFF, 0x83, 0xF1, 0xE3, 0xC0, 0x3C, 0x78, 0xE0, 0xF0, 0xFC, 0x3E, 0x1E, 0x18, 0xF0, 0x3E, 0x1F, 0x07, 0x81, 0xE1, 0xE3, 0x1C, 0x38, 0x87, 0x83, 0xC1, 0xCE, 0x1E, 0x0F, 0x1F, 0x0F, 0x83, 0xC1, 0xC3, 0x8F, 0x1E, 0x07 ), "V": ( 0xFF, 0xFF, 0xFF, 0x39, 0xE7, 0xF9, 0xE6, 0x00, 0x00, 0x00, 0x07, 0xF0, 0x06, 0x07, 0x73, 0xBD, 0xFF, 0xCF, 0xFF, 0xE3, 0x9C, 0x00, 0x03, 0xE7, 0x7F, 0xCF, 0x3F, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x07, 0xF3, 0xF8, 0xE7, 0x80, 0x00, 0x00, 0x01, 0xF0, 0x00, 0x00, 0x87, 0xE3, 0xFF, 0xFF, 0xFF, 0xFF, 0x87, 0x3C, 0x43, 0x9F, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0x1E, 0xFF, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x31, 0x0F, 0x3E, 0x0F, 0xFF, 0xFE, 0xFF, 0x9E, 0x43, 0x9C, 0x39, 0xE7, 0xFC, 0xCF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF3, 0xFF, 0x7F, 0x9E, 0x70, 0x00, 0x00, 0x00, 0x3F, 0x82, 0x1C, 0x71, 0x00, 0xF3, 0xFF, 0xFF, 0xF0, 0xE3, 0xDC, 0x73, 0x87, 0x1F, 0xE7, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xC1, 0xF8, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x8C, 0x01, 0x80, 0x40, 0x87, 0xF3, 0x7F, 0xE7, 0x1F, 0xFF, 0x83, 0x1C, 0xFF, 0x8E, 0xFF, 0xCF, 0xBF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x83, 0xFF, 0xCF, 0x20, 0x00, 0x00, 0x00, 0x03, 0x30, 0x03, 0x0C, 0xE7, 0x1F, 0xFF, 0xBF, 0xDE, 0x7B, 0xC7, 0x39, 0xDF, 0x7F, 0xCF, 0xFF, 0x9F, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xEE, 0x1F, 0xFF, 0x00, 0x0C, 0x00, 0x00, 0x00, 0xE3, 0x00, 0x60, 0x00, 0x7F, 0x7F, 0xFF, 0xFE, 0xE0, 0x7C, 0x78, 0x61, 0xFB, 0xFF, 0x8F, 0x7F, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0x88, 0x7F, 0xFE, 0xEC, 0x00, 0x00, 0x00, 0x70, 0x03, 0x30, 0x80, 0x07, 0xDE, 0xFF, 0xF8, 0x7F, 0xF0, 0xEC, 0x09, 0xFE, 0xF1, 0xDF, 0x79, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFB, 0xE4, 0x30, 0xC0, 0x1C, 0x40, 0x3B, 0x00, 0x20, 0x00, 0x47, 0xE7, 0xFF, 0x9C, 0xF7, 0xE0, 0xC3, 0x78, 0xF7, 0xCE, 0x73, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0xFF, 0x80, 0x61, 0x00, 0x1C, 0x00, 0x3C, 0x21, 0x80, 0x18, 0xF3, 0xF3, 0xFF, 0xE7, 0xFC, 0x67, 0x1F, 0x80, 0x01, 0xFC, 0x3F, 0xFF, 0x8F, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE7, 0xFD, 0xDF, 0xFF, 0x80, 0x42, 0x00, 0x18, 0x06, 0xE0, 0x07, 0x81, 0x18, 0xE7, 0xFF, 0xEF, 0x79, 0x9C, 0x39, 0xE6, 0x30, 0xFF, 0x1C, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF3, 0x8F, 0xFC, 0x00, 0x60, 0x00, 0x70, 0x0E, 0xF0, 0x00, 0x00, 0x19, 0xEF, 0xBD, 0xE7, 0xFF, 0xC1, 0x1F, 0x81, 0x87, 0xFF, 0x81, 0xF0, 0x0F, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x7E, 0xF8, 0x00, 0x60, 0x00, 0x30, 0xC0, 0x00, 0xC0, 0x47, 0x3E, 0xF3, 0x3F, 0xF7, 0xCF, 0x0C, 0xE1, 0xEF, 0x31, 0xC3, 0x8E, 0x78, 0xFF, 0xBC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF7, 0xFC, 0xE7, 0xFF, 0xE0, 0x01, 0x84, 0x30, 0x00, 0x79, 0x90, 0x06, 0x00, 0x3B, 0xFF, 0xFE, 0x3F, 0xCE, 0x31, 0x8E, 0x70, 0x0F, 0xFD, 0xCF, 0xF8, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x9E, 0x7F ), "F": ( 0x39, 0xCC, 0xF1, 0xC1, 0x9B, 0x83, 0x18, 0xC7, 0x1C, 0x79, 0x18, 0xF1, 0x81, 0xF8, 0x80, 0xE6, 0x86, 0x67, 0x18, 0xE4, 0x8F, 0xEC, 0x0E, 0x73, 0xB8, 0x8E, 0x32, 0x31, 0x81, 0xCC, 0x7C, 0x7E, 0x30, 0x37, 0x78, 0x03, 0x9C, 0xCB, 0x0C, 0xC1, 0xCE, 0x73, 0x73, 0xE2, 0x67, 0x63, 0x70, 0x8E, 0x70, 0x1C, 0x3F, 0xE0, 0xC7, 0x0C, 0x71, 0xC3, 0x0F, 0x8E, 0x71, 0xC6, 0x01, 0xC6, 0x98, 0xC6, 0x3E, 0x6E, 0x70, 0x63, 0x66, 0x73, 0x66, 0xF2, 0x6E, 0x71, 0xCC, 0xCE, 0xE7, 0x73, 0x07, 0x39, 0x19, 0x22, 0x83, 0x03, 0xF1, 0x81, 0xE1, 0xE0, 0xE3, 0x83, 0xF9, 0x8F, 0x99, 0x1C, 0x30, 0x1C, 0xF0, 0x63, 0x31, 0xC6, 0x63, 0x7D, 0x03, 0x03, 0x67, 0x31, 0x00, 0x0E, 0x73, 0xE0, 0xE1, 0xF6, 0xE3, 0x00, 0xE8, 0xF8, 0xF9, 0x9C, 0x9F, 0x19, 0x8D, 0x8E, 0xC7, 0x38, 0x39, 0x83, 0x83, 0x81, 0xF8, 0x38, 0xE2, 0x0F, 0x31, 0xF0, 0x03, 0xCF, 0x03, 0xC0, 0x27, 0x63, 0xC3, 0x63, 0x86, 0x7C, 0x7C, 0x79, 0xC3, 0x31, 0xF3, 0x33, 0x8F, 0x0F, 0x87, 0x38, 0x71, 0x9C, 0xC1, 0xCE, 0xE6, 0x7C, 0x60, 0x3E, 0x11, 0xF1, 0x8C, 0xE6, 0x1F, 0x18, 0xCC, 0xEC, 0x79, 0x3E, 0x2F, 0x1C, 0x39, 0x19, 0xE6, 0x03, 0x61, 0x8F, 0x04, 0x1F, 0x61, 0xC3, 0x33, 0xFB, 0x3C, 0x66, 0x47, 0xCC, 0xE4, 0xC0, 0x07, 0x9F, 0x00, 0xE1, 0x38, 0xFF, 0x3D, 0x81, 0x8E, 0xC0, 0xF8, 0x38, 0xC7, 0x39, 0x83, 0x1C, 0xF3, 0x33, 0x3B, 0x36, 0x3F, 0xC6, 0xE7, 0x06, 0xE6, 0x67, 0x70, 0xC1, 0xCC, 0x7C, 0xC4, 0x64, 0xC3, 0x11, 0xB9, 0x38, 0x63, 0x73, 0xE3, 0xE6, 0x70, 0x7E, 0x60, 0xBC, 0xE3, 0xB0, 0x63, 0x36, 0x73, 0x26, 0x78, 0xCC, 0xE0, 0x9C, 0xF8, 0xDC, 0xCC, 0x9C, 0xC9, 0x9C, 0x19, 0x1F, 0x3C, 0x01, 0x8F, 0xC7, 0x80, 0xD3, 0x18, 0x19, 0xF1, 0x10, 0xE2, 0xF0, 0xF8, 0xC9, 0x99, 0x99, 0x98, 0xF1, 0xBC, 0x33, 0x07, 0x03, 0x61, 0xF0, 0x62, 0x38, 0xE1, 0x98, 0xC1, 0x98, 0xEF, 0x9C, 0xC7, 0x1C, 0xC1, 0x9C, 0xF3, 0x31, 0xB9, 0xE3, 0x98, 0x63, 0x72, 0x33, 0x07, 0x03, 0x8E, 0x1F, 0x0C, 0x46, 0x37, 0x3C, 0xE0, 0xE4, 0xF6, 0x60, 0xC6, 0x38, 0xDC, 0x80, 0xDD, 0x8C, 0xC3, 0x07, 0x18, 0xC6, 0xE1, 0xC7, 0x61, 0x86, 0x70, 0xE6, 0x30, 0x67, 0x3C, 0x76, 0x66, 0x78, 0x3F, 0x0F, 0x06, 0xCF, 0xC8, 0xCC, 0x0E, 0x39, 0xC8, 0x98, 0x0E, 0x76, 0xCE, 0x19, 0xCC, 0x8C, 0xC7, 0x8C, 0xD8, 0xC3, 0x30, 0xCE, 0x78, 0x38, 0xF0, 0xE9, 0x07, 0x1D, 0x19, 0x98, 0x38, 0x72, 0x33, 0x33, 0x91, 0x04, 0xE3, 0x23, 0x9C, 0xE3, 0x61, 0xCC, 0xDC, 0xCC, 0xCC, 0x9C, 0xD8, 0x79, 0xCC, 0x01, 0xCF, 0x11, 0x8E, 0x0F, 0x87, 0x20, 0x63, 0x3B, 0x38, 0x1C, 0x11, 0xF9, 0x39, 0xC3, 0x07, 0x1F, 0x01, 0x80, 0x5D, 0x84, 0xE5, 0x84, 0x71, 0xC6, 0x3C, 0x07, 0x0C, 0xC4, 0xFE, 0xC7, 0x8F, 0x1D, 0xCC, 0xC7, 0xEE, 0x63, 0x3C, 0x66, 0xE4, 0x60, 0xC7, 0x03, 0x1C, 0x8C, 0xEC, 0x64, 0x4C, 0xC6, 0x66, 0xE2, 0xCE, 0xD1, 0x0E, 0xC0, 0x3C, 0xDC, 0xEE, 0x7C, 0xCC, 0xF3, 0x71, 0x9D, 0x0C, 0x8F, 0x07, 0x9C, 0x18, 0xFC, 0x83, 0x18, 0x39, 0x98, 0x71, 0xFE, 0x63, 0xC7, 0x66, 0xC6, 0x46, 0xE6, 0x46, 0xCC, 0xC6, 0xE4, 0xC0, 0xFC, 0xF1, 0x30, 0x3B, 0x33, 0x18, 0x33, 0x1E, 0x33, 0x0E, 0x3F, 0x0C, 0x19, 0x37, 0x01, 0xB6, 0xE0, 0xF9, 0x70, 0x67, 0x73, 0x86, 0x3B, 0x00, 0x1E, 0x0E, 0x0E, 0xC7, 0xC6, 0x71, 0xC4, 0xC4, 0xE6, 0x60, 0x66, 0x70, 0xCE, 0xCE, 0x1F, 0x9D, 0x8C, 0xCF, 0x0C, 0x78, 0xDC, 0x31, 0xC7, 0x11, 0x0E, 0x38, 0x3E, 0x0C, 0x8E, 0x0F, 0x1F, 0x1C, 0x18, 0xCE, 0x79, 0xC7, 0x31, 0x88, 0x78, 0x70, 0x9C, 0xCE, 0x77, 0x6C, 0xC6, 0x6E, 0xCE, 0x03, 0x39, 0xCC, 0xE8, 0xE3, 0x88, 0xE7, 0xC0, 0xCD, 0x87, 0x99, 0x1C, 0xFC, 0x13, 0x18, 0x99, 0x9C, 0xF9, 0xB8, 0x98, 0x31, 0xB9, 0xC7, 0xC1, 0x83, 0x81, 0x8E, 0x67, 0xC1, 0x1C, 0xC6, 0x70, 0x19, 0x9C, 0x67, 0x1C, 0x60 ), "H": ( 0x7F, 0xFE, 0x7C, 0x00, 0xFC, 0x7F, 0xF0, 0x08, 0x1E, 0x0F, 0xFF, 0x07, 0x80, 0x1F, 0xF8, 0x3F, 0x00, 0x30, 0x1F, 0x78, 0x38, 0x0F, 0x80, 0xFE, 0x00, 0x06, 0x07, 0xFF, 0x0F, 0xC0, 0x3F, 0x9F, 0xC0, 0x01, 0xEF, 0x83, 0xE0, 0x1F, 0x0F, 0xC1, 0xF0, 0x40, 0x1F, 0xC3, 0xFE, 0x03, 0xFF, 0xF8, 0xFE, 0x03, 0x00, 0xFF, 0x07, 0xC0, 0x00, 0x7F, 0xC0, 0xE0, 0x7E, 0x00, 0x01, 0xFF, 0x86, 0x01, 0xFC, 0x0F, 0xF2, 0x03, 0xC0, 0x07, 0xFF, 0xFF, 0x00, 0xC0, 0x1F, 0xF9, 0x98, 0x00, 0x0C, 0x7F, 0xFF, 0xC0, 0x1E, 0x3F, 0xFC, 0x00, 0x0E, 0x7C, 0x1F, 0x01, 0xFE, 0x00, 0xFF, 0xC0, 0x7E, 0x03, 0xF8, 0x00, 0x70, 0xFC, 0xF8, 0x00, 0x1F, 0x80, 0xF0, 0x0C, 0x07, 0xFD, 0xF0, 0x38, 0x38, 0xFF, 0x3F, 0x80, 0xFF, 0x80, 0xFF, 0x00, 0xFF, 0x01, 0xF8, 0x07, 0xF0, 0x3E, 0x00, 0x03, 0xE0, 0x1F, 0x80, 0x07, 0xFC, 0x07, 0xE0, 0xE7, 0xF8, 0x70, 0x78, 0x00, 0xFE, 0x03, 0xF0, 0x07, 0xF8, 0x0F, 0x00, 0x7F, 0xE1, 0x87, 0xFF, 0xF8, 0x01, 0xFF, 0x01, 0xF0, 0x07, 0xF0, 0x0F, 0xF0, 0x00, 0x00, 0x7F, 0xCF, 0x80, 0x00, 0xFF, 0xC0, 0x7C, 0x00, 0x7F, 0x80, 0xFF, 0x00, 0x3E, 0x00, 0xFF, 0x00, 0xFE, 0x00, 0x7E, 0x3F, 0x0E, 0x00, 0x7F, 0x07, 0xFC, 0xFF, 0x00, 0x03, 0xFE, 0x3E, 0x06, 0x00, 0x3F, 0x00, 0x7C, 0x00, 0x23, 0xC0, 0x38, 0x0F, 0xBC, 0x1E, 0x00, 0x7F, 0x98, 0x7E, 0x0F, 0x78, 0x1F, 0xF8, 0x00, 0x08, 0xFE, 0x00, 0x00, 0x03, 0xFC, 0x07, 0xC0, 0x00, 0x7E, 0x07, 0xF0, 0x1E, 0x03, 0xF0, 0x00, 0x0F, 0xE0, 0x3C, 0x1F, 0xC7, 0xE0, 0x3C, 0x07, 0x98, 0x7F, 0x0F, 0x80, 0xE1, 0xFF, 0xCE, 0x00, 0x8F, 0xE7, 0xC0, 0x03, 0xC7, 0xC3, 0xC3, 0xC1, 0xC1, 0xEF, 0xFF, 0x80, 0x01, 0xFF, 0x87, 0xC1, 0x80, 0x0F, 0xC7, 0x81, 0xF8, 0x07, 0xE0, 0xFF, 0xC0, 0x01, 0xC0, 0xFE, 0x00, 0x00, 0x78, 0xF8, 0x0F, 0x80, 0x78, 0x01, 0xFC, 0x00, 0xF8, 0x01, 0xF1, 0xFB, 0xF0, 0x07, 0xF8, 0x03, 0xFF, 0x83, 0x83, 0xFF, 0xFF, 0x80, 0x3F, 0x1F, 0xC0, 0x00, 0x31, 0xFC, 0x1E, 0x00, 0x1F, 0x80, 0x1F, 0x00, 0x3F, 0x00, 0xFC, 0x07, 0xFF, 0x80, 0x3E, 0x07, 0xC0, 0xE0, 0x3C, 0x78, 0x38, 0xF8, 0x7C, 0x1F, 0xC0, 0x1E, 0x0F, 0x1F, 0x80, 0x1F, 0x1E, 0x00, 0x1F, 0x1E, 0x00, 0x0E, 0x00, 0x00, 0x3F, 0xC0, 0x00, 0x07, 0x0F, 0xC1, 0xFC, 0x03, 0xFF, 0xFF, 0xE0, 0x1F, 0x0F, 0xFF, 0xFE, 0x00, 0x03, 0xF8, 0x7F, 0xF0, 0x01, 0xF8, 0x1F, 0x80, 0xF0, 0x7F, 0x83, 0x81, 0x00, 0x01, 0xE0, 0xE0, 0x07, 0xC1, 0xFC, 0x00, 0x7F, 0xC0, 0x7F, 0x80, 0x1F, 0x81, 0xFF, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0x00, 0x02, 0x07, 0xE0, 0x0F, 0x00, 0x03, 0xF8, 0x0F, 0x83, 0x0E, 0x02, 0x0F, 0xC0, 0x7F, 0x0F, 0x03, 0xC0, 0x7E, 0x06, 0x07, 0xFF, 0x1F, 0x00, 0x1F, 0xCF, 0x1F, 0x03, 0xFC, 0x0F, 0xFF, 0x1F, 0x00, 0x0F, 0xC7, 0xFC, 0x00, 0x3F, 0xE0, 0x7F, 0x80, 0x60, 0x00, 0x7F, 0x03, 0xE1, 0x03, 0xC1, 0x83, 0xE0, 0x03, 0x00, 0xFC, 0x1F, 0xC0, 0x00, 0xFC, 0x0F, 0xC1, 0xF2, 0x03, 0xE1, 0x07, 0xC0, 0x0F, 0x80, 0x0F, 0x03, 0x87, 0x00, 0x00, 0x0F, 0x81, 0xFC, 0x07, 0xFF, 0xF0, 0x0E, 0x07, 0xFF, 0x87, 0x8F, 0x87, 0xFF, 0xCF, 0xC7, 0xE1, 0xFF, 0xFF, 0x01, 0xF0, 0xFF, 0x80, 0x01, 0x80, 0xE3, 0x00, 0xE0, 0x00, 0x00, 0x00, 0xE0, 0x0F, 0xF8, 0x00, 0xFE, 0x0F, 0x80, 0x3E, 0x0F, 0xFF, 0xC0, 0xF8, 0x03, 0xFE, 0x3F, 0x1F, 0x01, 0xFC, 0x00, 0x3F, 0x00, 0xFF, 0x00, 0x06, 0x03, 0xFC, 0x01, 0xFC, 0x07, 0xF0, 0x0F, 0xE0, 0x3E, 0x00, 0x3F, 0x80, 0xFE, 0x01, 0xF8, 0x00, 0xF8, 0x0F, 0xE0, 0x30, 0xF1, 0xF1, 0xF8, 0x00, 0xF0, 0x7F, 0xF0, 0x3F, 0x80, 0x7E, 0x07, 0xE0, 0x00, 0xFE, 0x03, 0xE0, 0x0F, 0xFE, 0x01, 0xF8, 0x0F, 0xF0, 0x74, 0x00, 0x18, 0x03, 0xF8, 0x00, 0xE0, 0x1F, 0xC0, 0x00, 0xC0, 0x7E, 0x03, 0xFC, 0x1C, 0x00, 0x3F, 0x78, 0x3E, 0x00, 0x7C, 0x3F, 0x00 ), "Z": ( 0xFC, 0x7E, 0x0F, 0x83, 0xF8, 0x3B, 0xC1, 0xF0, 0x7C, 0x1F, 0x80, 0xF0, 0x8F, 0x83, 0xE1, 0xC7, 0xE0, 0xF0, 0x7C, 0x07, 0x0F, 0x0F, 0x81, 0xC0, 0x63, 0xC0, 0x7C, 0x07, 0x83, 0xC0, 0x3E, 0x03, 0x80, 0x00, 0x00, 0x03, 0x00, 0x00, 0x7E, 0x1F, 0xF0, 0xF8, 0xF8, 0x7E, 0x3B, 0xE1, 0xF8, 0x07, 0x83, 0xC0, 0x1C, 0x00, 0xC3, 0xC1, 0xC0, 0x7C, 0xFC, 0x7C, 0x7F, 0x1D, 0x9E, 0x1E, 0x3E, 0x1E, 0x1F, 0x1E, 0x30, 0x3E, 0x1F, 0x07, 0x00, 0xF0, 0x1F, 0x00, 0x00, 0x38, 0x07, 0x00, 0x40, 0xFE, 0x1F, 0x8F, 0xFF, 0xF8, 0xF1, 0xF0, 0xF0, 0xF8, 0x61, 0xE0, 0xF8, 0x7C, 0x1F, 0x03, 0xF0, 0x7E, 0x07, 0x81, 0xE6, 0x1E, 0x0F, 0x87, 0xE0, 0xF0, 0x87, 0xC0, 0xE0, 0x78, 0x3E, 0x1F, 0x03, 0x81, 0xC0, 0x3C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0x8F, 0xE0, 0x7F, 0x8F, 0xE3, 0xCF, 0x8F, 0xC1, 0xE1, 0xC1, 0x87, 0x03, 0x81, 0xE0, 0x7C, 0x1F, 0xC1, 0xF0, 0x3C, 0x3E, 0x3F, 0x3C, 0xE0, 0x0F, 0x07, 0x87, 0xC3, 0xF8, 0x78, 0x1E, 0x78, 0xC0, 0x10, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x9F, 0x9F, 0xF1, 0xFE, 0xFC, 0xFF, 0x03, 0xC7, 0x83, 0xC3, 0xC0, 0x3E, 0x00, 0x30, 0x18, 0x7C, 0x1F, 0xC3, 0xE1, 0xF8, 0xE7, 0x18, 0xE1, 0xE0, 0x3F, 0x0F, 0x83, 0xC0, 0x73, 0x87, 0xE0, 0x7C, 0x00, 0x00, 0x70, 0x1C, 0x00, 0x00, 0xC0, 0x1C, 0x00, 0xF8, 0xFC, 0xFF, 0x3F, 0xFF, 0xE3, 0xC0, 0xFC, 0x3E, 0x03, 0x00, 0x03, 0xC1, 0xC0, 0x38, 0x3E, 0x0F, 0x0F, 0x1E, 0x1F, 0x07, 0xF0, 0xFE, 0x1F, 0xC0, 0x78, 0x7F, 0x07, 0xC1, 0xF0, 0x7E, 0x0F, 0x81, 0xE0, 0x78, 0x0E, 0x00, 0x60, 0x00, 0x00, 0x00, 0x7D, 0x03, 0xFF, 0xFF, 0xFF, 0x0F, 0xE0, 0x3F, 0x81, 0xF8, 0x1C, 0x0F, 0x02, 0x78, 0x3F, 0x03, 0xE0, 0xFC, 0x1F, 0x83, 0xC1, 0xE1, 0xFC, 0xE3, 0xC1, 0xF0, 0x3E, 0x1F, 0x03, 0xC0, 0xCF, 0x07, 0xC0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1C, 0x3F, 0x01, 0xF0, 0x7F, 0xF0, 0xFF, 0x1F, 0x0F, 0x8F, 0x07, 0x81, 0xC0, 0x0F, 0x03, 0xB0, 0xE0, 0x3C, 0x73, 0x81, 0xC7, 0x78, 0x7F, 0x83, 0xC1, 0xF0, 0xF8, 0xF8, 0x3C, 0x0F, 0x01, 0xE0, 0xF8, 0x70, 0x03, 0x00, 0x38, 0x00, 0x01, 0xC0, 0xF0, 0x00, 0x07, 0xC3, 0xFF, 0xFF, 0xF8, 0xFE, 0x1E, 0x1F, 0x83, 0x87, 0x07, 0x06, 0x00, 0x0F, 0x0F, 0x81, 0xE0, 0xFE, 0x0F, 0xE1, 0xE4, 0x3E, 0x1F, 0x18, 0xF0, 0x1F, 0x87, 0x81, 0xE3, 0x81, 0xE1, 0xF0, 0xF0, 0x0F, 0x81, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x81, 0xF8, 0xFF, 0xFF, 0xC0, 0xF8, 0xC7, 0xC1, 0x8F, 0x0C, 0x60, 0x1E, 0x1F, 0x07, 0x10, 0x1E, 0x1E, 0x1F, 0x1F, 0xC7, 0xFF, 0xC1, 0xF0, 0x3E, 0x03, 0xC3, 0xE0, 0xF0, 0x78, 0xE1, 0xF8, 0x7C, 0x07, 0xC0, 0xF0, 0x00, 0x03, 0x00, 0x00, 0x01, 0xFF, 0xC3, 0xF3, 0xFF, 0xF9, 0xF8, 0x7F, 0x1F, 0xC0, 0xF0, 0x3E, 0x0F, 0x81, 0xE0, 0xF0, 0x78, 0x1C, 0xF0, 0x3E, 0x0F, 0x07, 0xC3, 0x81, 0xE3, 0xC0, 0x7E, 0x0F, 0x83, 0xC7, 0x1E, 0x1F, 0x00, 0xF0, 0x3E, 0x07, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x7F, 0x0F, 0xF3, 0xFF, 0xFC, 0x0F, 0x1E, 0x7E, 0x3C, 0x07, 0xE0, 0xE1, 0xE0, 0x38, 0xC0, 0xDC, 0x1E, 0x1E, 0x3F, 0xC1, 0xCE, 0x3E, 0x03, 0xF0, 0xCF, 0x07, 0xC3, 0xC1, 0xF0, 0xF0, 0x43, 0xC1, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7E, 0xCF, 0xF1, 0xFF, 0xFF, 0xF0, 0xFC, 0x3F, 0xE1, 0xE0, 0x30, 0x40, 0xE0, 0xFC, 0x1E, 0x0F, 0x0F, 0x07, 0x87, 0xE1, 0xFC, 0x78, 0x4F, 0x03, 0x83, 0xC3, 0xC0, 0xF0, 0x1F, 0x07, 0x80, 0xFC, 0x02, 0x03, 0x80, 0x1C, 0x00, 0x00, 0x00, 0x20, 0x00, 0x3F, 0x0F, 0xE7, 0xE3, 0xF7, 0xF0, 0xFC, 0x1F, 0xC7, 0x1C, 0x00, 0xE0, 0x3C, 0x3F, 0x0F, 0x1F, 0x0E, 0x21, 0xF0, 0xF8, 0xF0, 0xFC, 0x3E, 0x1F, 0x87, 0x8E, 0x71, 0x1E, 0x03, 0xF0, 0xFC, 0xF1, 0x80, 0x38, 0x00, 0x60, 0x00, 0x00, 0x00, 0x80, 0x00, 0xFF, 0x1F, 0xC7, 0xEF ), "AW": ( 0x00, 0x01, 0xFF, 0xFE, 0x00, 0x0E, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF8, 0x3F, 0xFF, 0xFE, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x1F, 0xFF, 0xE0, 0x00, 0x01, 0xFF, 0xFF, 0xC0, 0x00, 0x8F, 0xFF, 0xFE, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0x01, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x7F, 0xFF, 0xFE, 0x00, 0x00, 0x01, 0xFF, 0xFE, 0x00, 0x00, 0x1F, 0xFF, 0xF8, 0x00, 0x04, 0xFF, 0xFF, 0xC0, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xF8, 0x3F, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x0F, 0xFF, 0xC0, 0x00, 0x01, 0x8F, 0xFF, 0x00, 0x00, 0xCF, 0xFF, 0xFE, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xE0, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x7F, 0xFF, 0x00, 0x00, 0x1D, 0xFF, 0xFC, 0x00, 0x00, 0x7F, 0xFF, 0xF0, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0xFF, 0xFB, 0xFC, 0x1F, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xC0, 0x00, 0x00, 0x7F, 0xEF, 0x80, 0x00, 0x07, 0xFF, 0xFF, 0x80, 0x00, 0x07, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFE, 0x0F, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x07, 0xFF, 0xC0, 0x00, 0x00, 0xC7, 0xFF, 0x80, 0x00, 0x3F, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x1F, 0xFF, 0xBF, 0xC0, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0xFF, 0xFF, 0x80, 0x00, 0x03, 0xFF, 0xF8, 0x00, 0x00, 0x7F, 0xFF, 0xE0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xF7, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0x81, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xC0, 0x00, 0x21, 0xFF, 0xE0, 0x00, 0x03, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0xFF, 0xFF, 0xFE, 0x0F, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x0F, 0xFF, 0xF0, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x00, 0x03, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xF8, 0x3F, 0xFF, 0xFE, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x1F, 0xFF, 0xC0, 0x00, 0x01, 0xFF, 0xFF, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xC1, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0xFF, 0xFE, 0x00, 0x00, 0x1F, 0xFF, 0xFC, 0x00, 0x00, 0x3F, 0xFF, 0xF8, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xFF, 0xFB, 0xF8, 0x3F, 0xFF, 0xFE, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x1F, 0xFF, 0x80, 0x00, 0x00, 0xFF, 0xFF, 0x80, 0x00, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0x83, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x7F, 0xFF, 0xFE, 0x00, 0x00, 0x03, 0xFF, 0xFC, 0x00, 0x00, 0x3F, 0xFF, 0xF0, 0x00, 0x00, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x07, 0xFF, 0xEF, 0xE0, 0x3F, 0xFF, 0xFC, 0x00, 0x00, 0x07, 0xFF, 0xFF ), "AH": ( 0x00, 0x00, 0xFF, 0xFE, 0x00, 0x00, 0x3F, 0x00, 0x0F, 0xFF, 0xFF, 0x00, 0x03, 0xC7, 0xFF, 0x00, 0x03, 0xF3, 0xFF, 0xFF, 0xFF, 0x00, 0x1F, 0x07, 0xFF, 0xF0, 0x0F, 0x80, 0xFC, 0x00, 0x0F, 0xFF, 0xFF, 0x00, 0x03, 0xF0, 0x1F, 0xE0, 0x1F, 0xF8, 0x00, 0x00, 0x03, 0xFF, 0xE0, 0x00, 0x00, 0x78, 0x00, 0x3F, 0xFF, 0xFE, 0x00, 0x0E, 0x1F, 0xF8, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x3E, 0x0F, 0xFF, 0xC0, 0x1F, 0x01, 0xF8, 0x00, 0x1F, 0xFF, 0xFF, 0x00, 0x07, 0xE0, 0x7F, 0xC0, 0x7F, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0xC0, 0x20, 0x03, 0xF0, 0x00, 0xFF, 0xFF, 0xF0, 0x00, 0x30, 0x1F, 0xE0, 0x00, 0x7F, 0xC7, 0xFF, 0xFF, 0xFC, 0x00, 0x7C, 0x1F, 0xFF, 0x80, 0x3E, 0x03, 0xF0, 0x00, 0x3F, 0xFF, 0xFE, 0x00, 0x0F, 0xC0, 0xFF, 0xC0, 0x7F, 0xF0, 0x00, 0x00, 0x0F, 0xFF, 0x00, 0x00, 0x03, 0xE0, 0x00, 0xFF, 0xFF, 0xF0, 0x00, 0x1C, 0x1F, 0xF8, 0x00, 0x7F, 0xF7, 0xFF, 0xFF, 0xF8, 0x00, 0xF8, 0x7F, 0xFF, 0x00, 0x7C, 0x07, 0xE0, 0x00, 0x7F, 0xFF, 0xFC, 0x00, 0x1F, 0x80, 0x7F, 0x00, 0xFF, 0xC0, 0x00, 0x00, 0x3F, 0xFF, 0x00, 0x80, 0x03, 0xE0, 0x03, 0xFF, 0xFF, 0xE0, 0x01, 0xE0, 0xFF, 0xC0, 0x01, 0xFF, 0xCF, 0xFF, 0xFF, 0xF0, 0x01, 0xE0, 0xFF, 0xFE, 0x00, 0xF8, 0x0F, 0xC0, 0x00, 0xFF, 0xFF, 0xF0, 0x00, 0x3F, 0x83, 0xFF, 0x01, 0xFF, 0x80, 0x00, 0x00, 0x7F, 0xFE, 0x00, 0x00, 0x0F, 0x80, 0x01, 0xFF, 0xFF, 0xC0, 0x00, 0x61, 0xFF, 0x80, 0x01, 0xFF, 0x7F, 0xFF, 0xFF, 0xE0, 0x03, 0xC0, 0xFF, 0xFE, 0x01, 0xF0, 0x1F, 0x80, 0x01, 0xFF, 0xFF, 0xF0, 0x00, 0x7F, 0x03, 0xFF, 0x03, 0xFF, 0x80, 0x00, 0x00, 0xFF, 0xFC, 0x02, 0x00, 0x1F, 0x00, 0x07, 0xFF, 0xFF, 0xC0, 0x01, 0x73, 0xFF, 0xC0, 0x07, 0xFE, 0xFF, 0xFF, 0xFF, 0xC0, 0x07, 0x81, 0xFF, 0xF8, 0x03, 0xE0, 0x3F, 0x00, 0x03, 0xFF, 0xFF, 0xC0, 0x00, 0xFC, 0x0F, 0xFC, 0x07, 0xFF, 0x00, 0x00, 0x01, 0xFF, 0xF8, 0x00, 0x00, 0x7C, 0x00, 0x1F, 0xFF, 0xFF, 0x00, 0x06, 0x07, 0xFF, 0x00, 0x07, 0xF8, 0x7F, 0xDF, 0xFF, 0xC0, 0x07, 0x81, 0xFF, 0xF8, 0x03, 0xE0, 0x7F, 0x00, 0x03, 0xFF, 0xFF, 0xC0, 0x00, 0xF8, 0x07, 0xFC, 0x07, 0xFE, 0x00, 0x00, 0x01, 0xFF, 0xF8, 0x18, 0x00, 0xFE, 0x00, 0x1F, 0xFF, 0xFE, 0x00, 0x03, 0xC7, 0xFF, 0x00, 0x1F, 0xFB, 0xFF, 0xFF, 0xFF, 0x80, 0x0F, 0x03, 0xFF, 0xF0, 0x07, 0xC0, 0x7E, 0x00, 0x07, 0xFF, 0xFF, 0xC0, 0x01, 0xF8, 0x1F, 0xF8, 0x0F, 0xFC, 0x00, 0x00, 0x03, 0xFF, 0xF0, 0x10, 0x00, 0xFC, 0x00, 0x1F, 0xFF, 0xFE, 0x00, 0x07, 0x07, 0xFE, 0x00, 0x0F, 0xF0, 0x7F, 0xBF, 0xFF, 0x80, 0x0F, 0x07, 0xFF, 0xF0, 0x0F, 0x80, 0xFC, 0x00, 0x0F, 0xFF, 0xFF, 0x80, 0x01, 0xF0, 0x0F, 0xF8, 0x0F, 0xFC, 0x00, 0x00, 0x03, 0xFF, 0xE0, 0x1C, 0x00, 0x7C, 0x00, 0x3F, 0xFF, 0xFF, 0x00, 0x0E, 0x1F, 0xFE, 0x00, 0x3F, 0xF7, 0xFF, 0xFF, 0xFE, 0x00, 0x3C, 0x0F, 0xFF, 0xC0, 0x1F, 0x01, 0xF8, 0x00, 0x1F, 0xFF, 0xFF, 0x00, 0x07, 0xC0, 0x3F, 0xE0, 0x3F, 0xF0, 0x00, 0x00, 0x07, 0xFF, 0xC0, 0xE0, 0x00, 0xF8, 0x00, 0xFF, 0xFF, 0xFC, 0x00, 0x1E, 0x9F, 0xFC, 0x00, 0x3F, 0xE3, 0xFF, 0xFF, 0xFE, 0x00, 0x3C, 0x0F, 0xFF, 0xC0, 0x1F, 0x01, 0xF8, 0x00, 0x1F, 0xFF, 0xFF, 0x00, 0x07, 0xE0, 0x1F, 0xE0, 0x3F, 0xF8, 0x00, 0x00, 0x07, 0xFF, 0xC0, 0x70, 0x03, 0xF0, 0x00, 0x7F, 0xFF, 0xFC, 0x00, 0x3C, 0x7F, 0xF8, 0x00, 0x3F, 0xCF, 0xFF, 0x9F, 0xFF, 0x80, 0x1F, 0x07, 0xFF, 0xF0, 0x0F, 0x80, 0xFE, 0x00, 0x07, 0xFF, 0xFF, 0x80, 0x01, 0xF0, 0x1F, 0xF8, 0x0F, 0xFC, 0x00, 0x00, 0x01, 0xFF, 0xE0, 0x38, 0x00, 0xFC, 0x00, 0x1F, 0xFF, 0xFF, 0x00, 0x07, 0xCF, 0xFE, 0x00, 0x3F, 0xEF, 0xFF, 0xFF, 0xFF, 0x00, 0x3E, 0x0F, 0xFF, 0xE0, 0x1F, 0x00, 0xFC, 0x00, 0x0F, 0xFF, 0xFF, 0x00, 0x03, 0xF0, 0x3F, 0xF0, 0x3F, 0xF8, 0x00, 0x00, 0x07 ), "UH": ( 0x00, 0x7F, 0xFF, 0xFF, 0xC0, 0xE0, 0x0C, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x1C, 0x7F, 0x0F, 0xFF, 0xDF, 0xC0, 0x00, 0x01, 0xFF, 0xFF, 0xE0, 0x00, 0x1F, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x83, 0xF0, 0x7C, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF8, 0x1C, 0x01, 0x80, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x87, 0xE0, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x3F, 0xFF, 0xFE, 0x00, 0x03, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xF8, 0x7E, 0x07, 0x80, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x03, 0x00, 0x20, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x30, 0xFE, 0x3F, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0x80, 0x00, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFE, 0x1F, 0x81, 0xF0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xE0, 0xE0, 0x06, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0F, 0x1F, 0xC7, 0xFF, 0xFF, 0x38, 0x00, 0x00, 0x7F, 0xFF, 0xE0, 0x00, 0x0F, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xFE, 0xFF, 0xFF, 0x83, 0xE0, 0x7E, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF8, 0x18, 0x01, 0x80, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x03, 0xC7, 0xE0, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x1F, 0xFF, 0xFE, 0x00, 0x03, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xF0, 0x7E, 0x0F, 0x80, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x07, 0x00, 0x20, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xE0, 0xFC, 0x3F, 0xFF, 0xFF, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xF8, 0x1F, 0x03, 0xE0, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0x81, 0x80, 0x18, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x38, 0x3F, 0x0F, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0xFF, 0xFF, 0xE0, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFE, 0x0F, 0x81, 0xF0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xE0, 0x60, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x1F, 0x1F, 0xC7, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x7F, 0xFF, 0xF0, 0x00, 0x0F, 0xFF, 0xFE, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFE, 0x0F, 0x81, 0xF8, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xE0, 0x60, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x0F, 0x0F, 0xC3, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x7F, 0xFF, 0xF8, 0x00, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x03, 0xE0, 0x7C, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x07, 0x07, 0xE1, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x1F, 0xFF, 0xFE, 0x00, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xFD, 0xFF, 0xFF, 0x03, 0xE0, 0xFC, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xF0, 0x70, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x0F, 0x0F, 0xC1, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x3F, 0xFF, 0xF8, 0x00, 0x0F, 0xFF, 0xFE, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x07, 0xC0, 0xF8, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x0E, 0x0F, 0xC3, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x0F, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x83, 0xE0, 0x7C, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xF8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x03, 0x07, 0xF3, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x0F, 0xFF, 0xFE, 0x00, 0x03, 0xFF, 0xFF, 0x00, 0x00, 0x01, 0xF9, 0xFF, 0xFF, 0x07, 0xC0, 0xFC, 0x00, 0x00, 0x07, 0x7F, 0xFF, 0xF0, 0x60, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x06, 0x0F, 0x87, 0xFF, 0xFF, 0xF3, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xE3, 0xFF, 0xFC, 0x1F, 0x03, 0xF8 ), "AE": ( 0x01, 0xFF, 0xFF, 0x01, 0x63, 0xF0, 0x03, 0xF1, 0xFF, 0xBF, 0xF0, 0x03, 0xF8, 0x0F, 0xE0, 0xFE, 0x00, 0xFF, 0xE0, 0x0F, 0xC1, 0xFE, 0x03, 0xFC, 0x00, 0x01, 0x01, 0xFC, 0x07, 0xF0, 0x00, 0x03, 0xC0, 0x00, 0x1F, 0xE0, 0x07, 0xC0, 0x06, 0x00, 0x7F, 0x00, 0x7C, 0x3E, 0x00, 0x01, 0xF8, 0x01, 0xFF, 0xFF, 0xFF, 0xF0, 0x03, 0xF8, 0x0F, 0xE0, 0xFE, 0x00, 0xFB, 0xE0, 0x0F, 0xC0, 0xFE, 0x03, 0xFC, 0x00, 0x03, 0x00, 0xFC, 0x07, 0xF0, 0x00, 0x01, 0xE0, 0x00, 0x1F, 0xE0, 0x1C, 0x00, 0x00, 0x00, 0x7F, 0x80, 0xFF, 0xFF, 0x00, 0x03, 0xFC, 0x00, 0xFE, 0xFF, 0x9F, 0xFC, 0x01, 0xFC, 0x07, 0xF0, 0x7F, 0x00, 0x7D, 0xF8, 0x07, 0xE0, 0xFF, 0x01, 0xFE, 0x00, 0x03, 0x80, 0xFE, 0x03, 0xFC, 0x00, 0x00, 0xC0, 0x00, 0x07, 0xF8, 0x00, 0x0F, 0x80, 0x00, 0x3F, 0xE0, 0x00, 0xFF, 0x80, 0x00, 0x3F, 0x00, 0x3F, 0xFF, 0x1F, 0xF8, 0x01, 0xFC, 0x07, 0xF0, 0x7F, 0x00, 0xFF, 0xF0, 0x07, 0xE0, 0xFF, 0x01, 0xFE, 0x00, 0x03, 0x80, 0xFE, 0x03, 0xF8, 0x00, 0x00, 0x60, 0x08, 0x0F, 0xF0, 0x07, 0xF8, 0x03, 0xC0, 0x7F, 0x80, 0x7F, 0xF7, 0x80, 0x08, 0xF8, 0x01, 0xFF, 0xFF, 0xFF, 0xE0, 0x0F, 0xE0, 0x1F, 0x83, 0xFC, 0x03, 0xEF, 0xC0, 0x3F, 0x03, 0xF8, 0x0F, 0xF0, 0x00, 0x1E, 0x07, 0xF0, 0x3F, 0xC0, 0x00, 0x0F, 0x03, 0xC0, 0xFF, 0x01, 0xE0, 0x04, 0x00, 0x00, 0xFC, 0x03, 0xE1, 0xF8, 0x00, 0x00, 0x30, 0x03, 0xE7, 0xFF, 0xFF, 0x00, 0x7F, 0x80, 0xFE, 0x1F, 0xC0, 0x1F, 0x7F, 0x00, 0xFC, 0x1F, 0xC0, 0x7F, 0x80, 0x01, 0xF0, 0x3F, 0x80, 0xFF, 0x00, 0x00, 0x30, 0x04, 0x01, 0xF8, 0x07, 0x80, 0x70, 0x00, 0x03, 0xF0, 0x1F, 0x9F, 0xF0, 0x06, 0x08, 0x00, 0x1F, 0xFF, 0xFF, 0xF8, 0x01, 0xFC, 0x07, 0xE0, 0xFF, 0x00, 0xF3, 0xF8, 0x07, 0xE0, 0xFE, 0x01, 0xFE, 0x00, 0x07, 0x80, 0xFE, 0x07, 0xF8, 0x00, 0x03, 0xC0, 0x78, 0x1F, 0xE0, 0x3E, 0x03, 0x00, 0x00, 0x7F, 0x80, 0xF0, 0x7F, 0x80, 0x02, 0x80, 0x00, 0x79, 0xFF, 0xF3, 0xF0, 0x07, 0xF0, 0x1F, 0x83, 0xFC, 0x03, 0x8F, 0xE0, 0x1F, 0x83, 0xF8, 0x0F, 0xF8, 0x00, 0x1F, 0x07, 0xF0, 0x1F, 0xE0, 0x00, 0x0E, 0x01, 0x00, 0x7F, 0x00, 0x70, 0x00, 0x00, 0x00, 0xFE, 0x01, 0xF8, 0x7F, 0x00, 0x08, 0x70, 0x07, 0xFF, 0xFF, 0xFF, 0x80, 0x1F, 0x80, 0x7E, 0x0F, 0xE0, 0x0E, 0x1F, 0x00, 0xFE, 0x0F, 0xE0, 0x3F, 0xF0, 0x00, 0xF8, 0x1F, 0xC0, 0xFF, 0x00, 0x00, 0x70, 0x1E, 0x01, 0xFC, 0x0F, 0x00, 0xF8, 0x00, 0x03, 0xFC, 0x1E, 0x0F, 0xE0, 0x00, 0x00, 0x80, 0x0F, 0xFF, 0xFF, 0xF0, 0x01, 0xFC, 0x07, 0xE0, 0xFF, 0x00, 0xE1, 0xF8, 0x07, 0xE0, 0xFE, 0x03, 0xFF, 0x00, 0x0F, 0x80, 0xFC, 0x0F, 0xF8, 0x00, 0x03, 0xC1, 0xE0, 0x1F, 0x80, 0xF0, 0x07, 0x80, 0x00, 0x7F, 0x80, 0x20, 0x7F, 0x00, 0x00, 0x03, 0x00, 0x3F, 0xFF, 0xFF, 0x00, 0x3F, 0x80, 0xFE, 0x1F, 0xE0, 0x3F, 0x3F, 0x00, 0xFC, 0x3F, 0xC0, 0x7F, 0xC0, 0x00, 0xF0, 0x3F, 0x80, 0xFF, 0x00, 0x00, 0x70, 0x07, 0x00, 0xFF, 0xC0, 0x00, 0xFC, 0x00, 0x03, 0xFC, 0x38, 0x3F, 0xF0, 0x00, 0x07, 0xE0, 0x03, 0xFF, 0xFE, 0x00, 0x3F, 0x80, 0xFE, 0x0F, 0xE0, 0x1F, 0xFF, 0x00, 0xFC, 0x1F, 0xC0, 0x3F, 0xC0, 0x00, 0x70, 0x1F, 0x80, 0xFF, 0x00, 0x00, 0x3C, 0x00, 0x00, 0xFF, 0xC0, 0x0F, 0xFE, 0x00, 0x07, 0xF8, 0x03, 0xFF, 0xF0, 0x00, 0x3B, 0x00, 0x0F, 0xFF, 0xFC, 0x00, 0xFF, 0x01, 0xF8, 0x3F, 0xC0, 0x7F, 0xFC, 0x01, 0xF8, 0x7F, 0x80, 0xFF, 0x00, 0x00, 0xF0, 0x7F, 0x01, 0xFE, 0x00, 0x00, 0x78, 0x00, 0x01, 0xFF, 0x00, 0x7F, 0x8F, 0x00, 0x1F, 0xE0, 0x0C, 0xFF, 0xE0, 0x00, 0x3F, 0x00, 0x7F, 0xFF, 0xF0, 0x03, 0xFC, 0x07, 0xE0, 0xFF, 0x00, 0xFF, 0xF0, 0x07, 0xC1, 0xFE, 0x03, 0xFC, 0x00, 0x03, 0xC1, 0xFC, 0x07, 0xF8, 0x00, 0x03, 0xE0, 0x00, 0x0F, 0x81, 0xE0, 0x0F, 0xD0, 0x00, 0x7E, 0x03, 0xF0, 0xFF, 0x00, 0x00, 0xCE, 0x0F ), "OH": ( 0xFF, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xF8, 0x00, 0x03, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x1F, 0xFE, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x7F, 0xFE, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x03, 0xE0, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF8, 0x00, 0x0F, 0xBF, 0xFC, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xFF, 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0xFE, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x7F, 0xFE, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0xF0, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x3F, 0x80, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x07, 0xFF, 0xC0, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x07, 0xF8, 0x02, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xC0, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xC0, 0x00, 0x03, 0x87, 0xC8, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0xFC, 0x01, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x07, 0xE0, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFE, 0x00, 0x07, 0x07, 0xFE, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x7F, 0x00, 0xC0, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x03, 0xF8, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x7F, 0xF8, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x01, 0xFC, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x1F, 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x7F, 0xFC, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x01, 0xFC, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0x00, 0x01, 0xC1, 0xFF, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x7C, 0x01, 0x80, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x02, 0x00, 0x00, 0x00, 0x07, 0xF0, 0x3C, 0x0F, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x7C, 0x01, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xE0, 0x18, 0x07, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0xE3, 0xE0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x7E, 0x00, 0x80, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x08, 0x03, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xE0, 0x00, 0x01, 0xE0, 0xFF, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x7C, 0x01, 0x00, 0x00, 0x03, 0xFF ), "EH": ( 0xFF, 0x80, 0x00, 0x00, 0xC0, 0x0F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xC7, 0xF0, 0x00, 0x06, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x7C, 0x07, 0x03, 0xF3, 0xFC, 0x1F, 0xC0, 0x01, 0x80, 0x20, 0x00, 0x7F, 0x81, 0xFF, 0xE0, 0x60, 0x00, 0x38, 0x01, 0xFF, 0xFF, 0xFD, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xDF, 0xF8, 0x7C, 0x00, 0x06, 0x00, 0x7F, 0xFF, 0xFE, 0xFF, 0xF8, 0x00, 0x1F, 0x01, 0xC1, 0xFC, 0xFF, 0x07, 0xF0, 0x00, 0x60, 0x00, 0x00, 0x1F, 0xF0, 0x7F, 0xF8, 0x18, 0x00, 0x1E, 0x00, 0xFF, 0xFF, 0xFF, 0x7F, 0xC0, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFC, 0x07, 0xC0, 0x03, 0xE0, 0x7F, 0xFF, 0xFF, 0x87, 0xFC, 0x00, 0x07, 0xC0, 0x78, 0x7F, 0x1F, 0x81, 0xFC, 0x00, 0x0C, 0x00, 0x00, 0x03, 0xFC, 0x1F, 0xFE, 0x00, 0x00, 0x03, 0x80, 0x3F, 0xFF, 0xFF, 0x9F, 0xFC, 0x00, 0x00, 0x00, 0x03, 0xFF, 0x87, 0xFF, 0xFF, 0x00, 0x7E, 0x00, 0x1D, 0xFF, 0xFF, 0x3F, 0xF8, 0x00, 0x00, 0xF8, 0x0E, 0x1F, 0xC7, 0xF0, 0x7F, 0x80, 0x07, 0x00, 0x00, 0x00, 0xFF, 0x07, 0xFF, 0x80, 0x80, 0x00, 0xE0, 0x0F, 0xFF, 0xDF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x18, 0x1F, 0xFF, 0xFF, 0xFC, 0x1E, 0x00, 0x07, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0xFC, 0x0F, 0x07, 0xE3, 0xF8, 0x3F, 0xC0, 0x03, 0x00, 0x00, 0x00, 0x7F, 0x83, 0xFF, 0xC0, 0xC0, 0x00, 0xF0, 0x07, 0xFF, 0xFF, 0xF9, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF0, 0xFE, 0x00, 0x01, 0x40, 0xFF, 0xFF, 0xFE, 0x7F, 0xF8, 0x00, 0x1F, 0x01, 0xC1, 0xFC, 0xFF, 0x07, 0xF8, 0x00, 0x60, 0x10, 0x00, 0x1F, 0xF0, 0xFF, 0xF8, 0x18, 0x00, 0x1E, 0x00, 0xFF, 0xFB, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFE, 0x0F, 0xC0, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x01, 0xF0, 0x1C, 0x1F, 0x8F, 0xE0, 0xFF, 0x00, 0x06, 0x00, 0x00, 0x01, 0xFE, 0x0F, 0xFF, 0x00, 0x00, 0x00, 0xC0, 0x0F, 0xFF, 0xBF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xE1, 0xFC, 0x00, 0x00, 0x03, 0xFF, 0x7F, 0xFF, 0xFF, 0xE0, 0x00, 0x0F, 0x81, 0xE0, 0xFC, 0x7F, 0x07, 0xF8, 0x00, 0x60, 0x00, 0x00, 0x0F, 0xF0, 0x7F, 0xF8, 0x18, 0x00, 0x0E, 0x00, 0xFF, 0xFF, 0xFF, 0x3F, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFE, 0x1F, 0xE0, 0x00, 0x18, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, 0xF0, 0x3C, 0x1F, 0x8F, 0xE0, 0xFF, 0x00, 0x0E, 0x00, 0x00, 0x01, 0xFE, 0x0F, 0xFF, 0x01, 0x00, 0x00, 0x80, 0x0F, 0xFF, 0x9F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0x81, 0xF8, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x1F, 0x01, 0xC1, 0xFC, 0xFF, 0x07, 0xF8, 0x00, 0x60, 0x00, 0x00, 0x1F, 0xE0, 0xFF, 0xF8, 0x18, 0x00, 0x1E, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x0F, 0xFE, 0x0F, 0xFF, 0xFC, 0x00, 0xFC, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x07, 0xC0, 0xF0, 0xFE, 0x3F, 0x83, 0xFC, 0x00, 0x38, 0x00, 0x00, 0x07, 0xF8, 0x3F, 0xFE, 0x04, 0x00, 0x06, 0x00, 0x7F, 0xFC, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFB, 0xFF, 0x03, 0xF0, 0x00, 0x18, 0x1F, 0xF8, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0xF8, 0x1E, 0x1F, 0xC7, 0xF0, 0x7F, 0x80, 0x07, 0x00, 0x00, 0x00, 0xFF, 0x07, 0xFF, 0x80, 0x00, 0x00, 0xC0, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x08, 0x00, 0x1F, 0xFE, 0x7F, 0x87, 0xF0, 0x00, 0x02, 0x03, 0xFF, 0xFF, 0xEF, 0xFF, 0xC0, 0x00, 0x3E, 0x07, 0x83, 0xF1, 0xFC, 0x1F, 0xE0, 0x00, 0xC0, 0x00, 0x00, 0x3F, 0xC1, 0xFF, 0xF0, 0x30, 0x00, 0x1C, 0x01, 0xFF, 0xFB, 0xFD, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x3F, 0x9C, 0x7F, 0xFF, 0xF0, 0x03, 0xE0, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x1F, 0x01, 0xC1, 0xFC, 0x7F, 0x0F, 0xF0, 0x00, 0xE0, 0x00 ), "OO": ( 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x07, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x0F, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x03, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0x40, 0x1E, 0x3F, 0xC0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0x70, 0x06, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x07, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x03, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x73, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xE1, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xC7, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF3, 0xFF, 0xC0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x7F, 0xFF, 0xD0, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x79, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00 ), "IH": ( 0xF8, 0x03, 0xFC, 0x01, 0xC0, 0x00, 0x00, 0xFC, 0x00, 0xFF, 0xC0, 0x7F, 0xE0, 0x03, 0xFC, 0x00, 0x1F, 0xE0, 0x0F, 0xFE, 0x07, 0xFF, 0x80, 0x3F, 0x00, 0xF8, 0x3C, 0x07, 0xE1, 0xFE, 0x0F, 0xFC, 0x03, 0xFC, 0x00, 0x3E, 0x01, 0xC0, 0xFF, 0xFC, 0x1F, 0xF0, 0x0F, 0xF0, 0x06, 0x00, 0x00, 0x00, 0x3F, 0x00, 0xFF, 0x80, 0xFF, 0xE0, 0x0F, 0x81, 0xC0, 0x0F, 0xE0, 0x1F, 0xF8, 0x07, 0xFE, 0x00, 0xFC, 0x03, 0xE0, 0x60, 0x3F, 0x87, 0xF0, 0x7F, 0xE0, 0x0F, 0xF0, 0x01, 0xF0, 0x0F, 0x01, 0xFF, 0xE0, 0x7F, 0xC0, 0x3F, 0xC0, 0x18, 0x00, 0x00, 0x01, 0xF8, 0x03, 0xFE, 0x03, 0xFF, 0x00, 0x3F, 0x80, 0x00, 0x7F, 0x80, 0xFF, 0xF0, 0x3F, 0xFC, 0x00, 0xFC, 0x07, 0xC1, 0xE0, 0x7F, 0x0F, 0xF0, 0xFF, 0xE0, 0x1F, 0xE0, 0x01, 0xF0, 0x0F, 0x03, 0xFF, 0xE0, 0xFF, 0x80, 0x3F, 0xC0, 0x00, 0x00, 0x00, 0x1F, 0xE0, 0x0F, 0xFC, 0x07, 0xFF, 0x00, 0x3F, 0xE0, 0x00, 0xFE, 0x00, 0xFF, 0xC0, 0x7F, 0xF8, 0x01, 0xF0, 0x0F, 0x83, 0xC0, 0xFE, 0x1F, 0xC0, 0xFF, 0xC0, 0x3F, 0xC0, 0x03, 0xE0, 0x3C, 0x0F, 0xFF, 0x81, 0xFF, 0x00, 0x7F, 0x00, 0x00, 0x00, 0x00, 0x3F, 0x80, 0x1F, 0xF0, 0x0F, 0xFC, 0x00, 0x7F, 0xE0, 0x03, 0xFE, 0x03, 0xFF, 0x81, 0xFF, 0x80, 0x1F, 0x80, 0xFC, 0x0C, 0x07, 0xE0, 0xFE, 0x0F, 0xFC, 0x03, 0xFC, 0x00, 0x7E, 0x01, 0xE0, 0x7F, 0xFC, 0x1F, 0xF0, 0x07, 0xF0, 0x06, 0x00, 0x00, 0x01, 0xFC, 0x01, 0xFF, 0x00, 0xFF, 0x00, 0x0E, 0x7F, 0x00, 0x1F, 0xC0, 0x3F, 0xF8, 0x0F, 0xF8, 0x01, 0xF8, 0x07, 0xC0, 0xC0, 0x7F, 0x0F, 0xE0, 0xFF, 0xC0, 0x3F, 0x80, 0x03, 0xE0, 0x3E, 0x07, 0xFF, 0xC1, 0xFF, 0x00, 0xFF, 0x00, 0xE0, 0x00, 0x00, 0x0F, 0xC0, 0x1F, 0xF8, 0x0F, 0xF8, 0x00, 0xFE, 0x00, 0x01, 0xFC, 0x01, 0xFF, 0x80, 0x7F, 0xE0, 0x03, 0xF0, 0x0F, 0x87, 0x80, 0xFE, 0x3F, 0xC1, 0xFF, 0x80, 0x7F, 0x80, 0x07, 0xC0, 0x78, 0x1F, 0xFF, 0x83, 0xFE, 0x00, 0xFF, 0x00, 0x80, 0x80, 0x00, 0x0F, 0xC0, 0x1F, 0xF0, 0x0F, 0xF8, 0x01, 0xF1, 0xFC, 0x03, 0xFE, 0x01, 0xFF, 0x80, 0xFF, 0xC0, 0x07, 0xC0, 0x3E, 0x07, 0x01, 0xF8, 0x7F, 0x03, 0xFF, 0x00, 0xFF, 0x00, 0x1F, 0x00, 0xF0, 0x3F, 0xFF, 0x07, 0xF8, 0x03, 0xFC, 0x03, 0x80, 0x00, 0x00, 0x1F, 0x80, 0x3F, 0xE0, 0x3F, 0xF0, 0x03, 0xC1, 0xF8, 0x07, 0xFC, 0x03, 0xFE, 0x03, 0xFF, 0xC0, 0x1F, 0x80, 0xF8, 0x1C, 0x07, 0xE1, 0xFE, 0x0F, 0xFC, 0x03, 0xF8, 0x00, 0x7E, 0x03, 0xC0, 0xFF, 0xF8, 0x1F, 0xF0, 0x07, 0xF0, 0x0C, 0x00, 0x00, 0x00, 0x7F, 0x00, 0xFF, 0xC0, 0x7F, 0xE0, 0x1F, 0xC7, 0xE0, 0x0F, 0xF0, 0x1F, 0xFE, 0x07, 0xFC, 0x00, 0xFC, 0x07, 0xC0, 0xC0, 0x7F, 0x0F, 0xF0, 0x7F, 0xE0, 0x1F, 0xC0, 0x03, 0xF0, 0x1F, 0x07, 0xFF, 0xC0, 0xFF, 0x00, 0x7F, 0x00, 0xF0, 0x00, 0x00, 0x01, 0xF8, 0x03, 0xFC, 0x07, 0xFE, 0x03, 0xF8, 0x1F, 0x80, 0xFF, 0x00, 0x7F, 0xE0, 0x7F, 0xC0, 0x03, 0xF0, 0x1F, 0x03, 0x80, 0xFE, 0x3F, 0x81, 0xFF, 0x80, 0x7F, 0x00, 0x07, 0xC0, 0x7C, 0x0F, 0xFF, 0x03, 0xFC, 0x00, 0xFF, 0x00, 0xC0, 0x00, 0x00, 0x03, 0xF0, 0x0F, 0xF8, 0x0F, 0xFC, 0x03, 0xF0, 0x7F, 0x01, 0xFF, 0x00, 0xFF, 0xC0, 0xFF, 0xCC, 0x01, 0xF8, 0x0F, 0x83, 0xC0, 0xFF, 0x1F, 0xC1, 0xFF, 0xC0, 0x3F, 0x80, 0x03, 0xE0, 0x3C, 0x0F, 0xFF, 0x83, 0xFE, 0x00, 0x7F, 0x00, 0x80, 0x00, 0x00, 0x07, 0xF0, 0x0F, 0xFC, 0x0F, 0xFE, 0x01, 0xF8, 0x7E, 0x01, 0xFF, 0x01, 0xFF, 0xC0, 0xFF, 0xE0, 0x01, 0xF8, 0x0F, 0x81, 0xC0, 0xFF, 0x1F, 0xC0, 0xFF, 0xC0, 0x3F, 0x80, 0x03, 0xE0, 0x1E, 0x07, 0xFF, 0x81, 0xFF, 0x00, 0xFF, 0x00, 0x60, 0x00, 0x00, 0x03, 0xF8, 0x07, 0xFC, 0x07, 0xFF, 0x01, 0xF8, 0x3F, 0x00, 0xFF, 0x00, 0xFF, 0xC0, 0x3F, 0xF8, 0x00, 0xFC, 0x07, 0xC1, 0xE0, 0x7F, 0x0F, 0xE0, 0xFF, 0xE0, 0x1F, 0xC0, 0x01, 0xF0, 0x0E, 0x03, 0xFF, 0xC0, 0xFF, 0x00, 0x3F, 0x80 ), "EE": ( 0x00, 0x03, 0xF0, 0x1F, 0x80, 0x3F, 0xE0, 0x0F, 0xE0, 0x1F, 0xF0, 0x7F, 0xFF, 0x01, 0xFE, 0x00, 0x07, 0xE0, 0x0F, 0xE0, 0x0F, 0xF0, 0x07, 0xFC, 0x07, 0xF8, 0x01, 0xFF, 0x00, 0xFE, 0x00, 0x7F, 0x00, 0x1F, 0xC0, 0x7F, 0xC0, 0x3F, 0xF8, 0x0F, 0xFE, 0x0C, 0x03, 0xF0, 0x1F, 0x80, 0x3F, 0xC0, 0x0F, 0xE0, 0x0F, 0xF0, 0x3F, 0xFF, 0x01, 0xFE, 0x00, 0x07, 0xE0, 0x0F, 0xE0, 0x0F, 0xF0, 0x03, 0xFC, 0x07, 0xF8, 0x01, 0xFF, 0x00, 0x7F, 0x00, 0x3F, 0x80, 0x0F, 0xE0, 0x3F, 0xE0, 0x0F, 0xF8, 0x07, 0xFC, 0x0F, 0xC0, 0x7E, 0x03, 0xF0, 0x07, 0xFC, 0x01, 0xFE, 0x03, 0xFE, 0x0F, 0xFF, 0xF0, 0x3F, 0x80, 0x00, 0xFC, 0x00, 0xFC, 0x01, 0xFE, 0x00, 0x7F, 0x00, 0xFF, 0x00, 0x7F, 0xC0, 0x07, 0xE0, 0x03, 0xF8, 0x01, 0xFC, 0x03, 0xFE, 0x03, 0xFF, 0x80, 0xFF, 0x03, 0xF8, 0x0F, 0xC0, 0x7E, 0x00, 0xFF, 0x00, 0x7F, 0x80, 0x7F, 0x81, 0xFF, 0xFE, 0x07, 0xF0, 0x00, 0x1F, 0x80, 0x3F, 0x80, 0x7F, 0x80, 0x1F, 0xE0, 0x1F, 0xE0, 0x1F, 0xE3, 0x00, 0xFC, 0x00, 0x7E, 0x00, 0x7F, 0x80, 0x7F, 0x80, 0x7F, 0xE0, 0x3F, 0xE0, 0x7E, 0x01, 0xF8, 0x1F, 0xC0, 0x3F, 0xE0, 0x0F, 0xF0, 0x0F, 0xF0, 0x3F, 0xFF, 0x81, 0xFE, 0x00, 0x03, 0xF0, 0x07, 0xE0, 0x0F, 0xF0, 0x03, 0xFC, 0x07, 0xFC, 0x01, 0xFF, 0x00, 0x3F, 0x00, 0x0F, 0xC0, 0x0F, 0xF0, 0x0F, 0xF0, 0x1F, 0xFC, 0x07, 0xFC, 0x07, 0xC0, 0x7E, 0x03, 0xF0, 0x07, 0xF8, 0x01, 0xFC, 0x03, 0xFC, 0x0F, 0xFF, 0xE0, 0x7F, 0x80, 0x00, 0xFC, 0x01, 0xFC, 0x01, 0xFC, 0x00, 0xFF, 0x01, 0xFF, 0x00, 0x7F, 0x80, 0x0F, 0xC0, 0x07, 0xF0, 0x03, 0xFC, 0x03, 0xFC, 0x03, 0xFF, 0x01, 0xFF, 0x80, 0x00, 0x7E, 0x03, 0xF0, 0x07, 0xFC, 0x01, 0xFE, 0x01, 0xFE, 0x07, 0xFF, 0xE0, 0x3F, 0xC0, 0x00, 0xFC, 0x01, 0xFC, 0x01, 0xFE, 0x00, 0xFF, 0x80, 0xFF, 0x80, 0x7F, 0xE0, 0x07, 0xE0, 0x03, 0xF0, 0x01, 0xFC, 0x03, 0xFE, 0x01, 0xFF, 0x01, 0xFF, 0x81, 0xF0, 0x1F, 0x00, 0xFC, 0x01, 0xFE, 0x00, 0xFF, 0x00, 0xFF, 0x03, 0xFF, 0xF8, 0x0F, 0xE0, 0x00, 0x3F, 0x00, 0x7F, 0x00, 0xFF, 0x00, 0x3F, 0xC0, 0x7F, 0xC0, 0x1F, 0xE0, 0x03, 0xF0, 0x01, 0xFC, 0x00, 0xFF, 0x00, 0xFF, 0x80, 0xFF, 0xC0, 0x7F, 0xE0, 0x00, 0x3F, 0x00, 0xFC, 0x01, 0xFF, 0x00, 0x7F, 0x00, 0xFF, 0x01, 0xFF, 0xF8, 0x0F, 0xE0, 0x00, 0x3F, 0x00, 0x7F, 0x00, 0xFF, 0x80, 0x3F, 0xC0, 0x3F, 0xE0, 0x0F, 0xF0, 0x03, 0xF0, 0x01, 0xFC, 0x00, 0xFE, 0x01, 0xFF, 0x80, 0x7F, 0xC0, 0xFF, 0xE0, 0x38, 0x0F, 0xC0, 0x7F, 0x00, 0xFF, 0x00, 0x3F, 0x80, 0x7F, 0x80, 0xFF, 0xFE, 0x07, 0xF8, 0x00, 0x0F, 0x80, 0x3F, 0x80, 0x3F, 0xC0, 0x0F, 0xF0, 0x1F, 0xF0, 0x0F, 0xF0, 0x01, 0xF8, 0x00, 0x7E, 0x00, 0x3F, 0x80, 0x7F, 0xE0, 0x3F, 0xF0, 0x3F, 0xF8, 0x38, 0x07, 0xE0, 0x3F, 0x00, 0x7F, 0x80, 0x1F, 0xC0, 0x1F, 0xE0, 0x7F, 0xFF, 0x03, 0xFC, 0x00, 0x07, 0xC0, 0x1F, 0xC0, 0x1F, 0xE0, 0x07, 0xF8, 0x0F, 0xF8, 0x03, 0xFC, 0x00, 0xFC, 0x00, 0x3E, 0x00, 0x1F, 0xC0, 0x3F, 0xE0, 0x7F, 0xF0, 0x1F, 0xF8, 0x07, 0x03, 0xF0, 0x1F, 0x80, 0x3F, 0xC0, 0x0F, 0xE0, 0x1F, 0xE0, 0x7F, 0xFF, 0x81, 0xFE, 0x00, 0x07, 0xE0, 0x0F, 0xE0, 0x1F, 0xF0, 0x03, 0xFC, 0x07, 0xF8, 0x03, 0xFC, 0x00, 0x7E, 0x00, 0x1F, 0x00, 0x0F, 0xE0, 0x1F, 0xF0, 0x0F, 0xF8, 0x0F, 0xFE, 0x0F, 0x01, 0xF0, 0x0F, 0xC0, 0x1F, 0xE0, 0x07, 0xF0, 0x0F, 0xF0, 0x3F, 0xFF, 0x80, 0xFE, 0x00, 0x03, 0xF0, 0x03, 0xF0, 0x0F, 0xF8, 0x03, 0xFE, 0x03, 0xFE, 0x00, 0xFC, 0x00, 0x3F, 0x00, 0x02, 0x90, 0x03, 0xF8, 0x0F, 0xFE, 0x0F, 0xFC, 0x03, 0xFF, 0x00, 0x03, 0xF0, 0x1F, 0x80, 0x3F, 0xE0, 0x0F, 0xF0, 0x0F, 0xF0, 0x3F, 0xFF, 0x80, 0xFF, 0x00, 0x07, 0xE0, 0x07, 0xE0, 0x0F, 0xF0, 0x03, 0xFC, 0x07, 0xFC, 0x01, 0xFC, 0x00, 0x7E, 0x00, 0x1F, 0x00, 0x0F, 0xE0, 0x1F, 0xF8, 0x1F, 0xFC, 0x0F ), "WH": ( 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xC0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x07, 0xC0, 0x00, 0x00, 0x00, 0x07, 0xE3, 0x8F, 0xFF, 0xE0, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1E, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xB1, 0xE3, 0xFF, 0xFF, 0xFF, 0xFF, 0xC6, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xE7, 0x00, 0x01, 0xE7, 0xFF, 0xFE, 0xBF, 0x39, 0xFF, 0xF7, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x1E, 0x8F, 0x81, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x0F, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x03, 0xFF, 0xF8, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x3F, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFE, 0xE0, 0x00, 0x00, 0x00, 0x18, 0x07, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFC, 0x60, 0x00, 0x00, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFD, 0xE0, 0xEF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xE0, 0x30, 0x07, 0x83, 0xF0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x00, 0x1E, 0x0F, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x1F, 0xFF, 0xFF, 0xE1, 0xF0, 0x04, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x27, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x07, 0xD9, 0xFF, 0x60, 0x08, 0x00, 0x00, 0x01, 0xE7, 0xFF, 0xFF, 0xC0, 0x30, 0x07, 0x80, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x07, 0xCF, 0xFF, 0xC1, 0xF0, 0x3F, 0x83, 0xE0, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0x80, 0x00, 0x23, 0x07, 0xC0, 0x00, 0x00, 0x7F, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x03, 0xFF, 0xF7, 0x80, 0x00, 0x00, 0x31, 0x40, 0x00, 0x00, 0x07, 0xFF, 0xEF, 0x80, 0x00, 0x00, 0x07, 0xFF, 0xFF, 0xDC, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x39, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x38, 0x77, 0xC0, 0x00, 0x00, 0x00, 0x73, 0xE6, 0x08, 0x00, 0x08, 0x22, 0x00, 0x00, 0x08, 0x33, 0xFF, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x01, 0x78, 0x00, 0x00, 0x07, 0xFF, 0xF8, 0x00, 0x00, 0x03, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x9E, 0xFE, 0x18, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x04, 0x10, 0x44, 0x00, 0x00, 0x00, 0x02 ), "CH": ( 0x0F, 0xC0, 0x3F, 0x07, 0xC3, 0x9F, 0x07, 0xE0, 0x07, 0x80, 0x7E, 0x1F, 0x78, 0x7E, 0x00, 0xF8, 0x1F, 0xC3, 0xF8, 0x7F, 0x0E, 0x3F, 0x07, 0x0F, 0x03, 0xF0, 0x00, 0xF0, 0xE3, 0x83, 0xE0, 0x78, 0xF0, 0xF8, 0x03, 0xC0, 0xF8, 0xE1, 0xFF, 0x07, 0x87, 0x87, 0xC3, 0xC3, 0x0F, 0xE0, 0xF1, 0xC0, 0xFE, 0x60, 0x0F, 0x80, 0xF8, 0xF8, 0x7C, 0x78, 0x7C, 0x70, 0xF8, 0x77, 0xF0, 0xC7, 0xE0, 0x7C, 0x7C, 0x00, 0xF8, 0x83, 0xF0, 0x3F, 0xF0, 0xC3, 0x86, 0x38, 0x7E, 0x1F, 0x1E, 0x7C, 0x07, 0x8E, 0x0F, 0xE0, 0xFC, 0x0F, 0x0F, 0x81, 0xC7, 0xC3, 0xF1, 0xC3, 0xC7, 0xE1, 0x8F, 0x03, 0xCE, 0x07, 0xCE, 0x3C, 0x78, 0x3E, 0x3C, 0x71, 0xC7, 0x1E, 0x38, 0x7F, 0x0F, 0x3C, 0x03, 0xE0, 0xF8, 0x1E, 0x70, 0x3E, 0x1E, 0x0F, 0xC3, 0xC3, 0x83, 0xC1, 0xCF, 0x0F, 0xC0, 0xFB, 0x0C, 0x78, 0x1F, 0x08, 0xEF, 0x83, 0xC0, 0xFF, 0x07, 0xC0, 0xF0, 0xF8, 0x3E, 0x3C, 0x3F, 0x03, 0xE0, 0xF0, 0xF8, 0x78, 0xF3, 0x87, 0xE1, 0x8F, 0x38, 0x7E, 0x3F, 0xC3, 0x98, 0x78, 0x78, 0x1E, 0x0F, 0x00, 0x78, 0x60, 0x7C, 0x31, 0xCC, 0x3F, 0x9E, 0x07, 0xC6, 0x03, 0xE0, 0xFF, 0x03, 0xFC, 0x70, 0xF8, 0x78, 0xFC, 0x1F, 0xE3, 0x00, 0xFC, 0x3F, 0x00, 0x78, 0x83, 0xF0, 0x3F, 0xE1, 0x07, 0x8E, 0x38, 0xFF, 0x03, 0xE0, 0x07, 0xC3, 0x80, 0x1F, 0x87, 0xF8, 0xF8, 0x70, 0xE0, 0x3E, 0x1E, 0x1C, 0x78, 0x7C, 0x0F, 0x83, 0x3F, 0x81, 0xF0, 0x7C, 0x03, 0xF0, 0xFC, 0x78, 0x78, 0x1E, 0x31, 0xF0, 0xF1, 0xE0, 0xF8, 0x3F, 0xE0, 0x3C, 0x1F, 0x0F, 0x1F, 0x0F, 0x80, 0xC7, 0x80, 0xFC, 0x1E, 0x1F, 0x86, 0x0F, 0x87, 0xC3, 0x81, 0xE1, 0x87, 0xF8, 0x78, 0x31, 0xE0, 0x3C, 0xF0, 0x7C, 0x0E, 0x03, 0xFC, 0x1F, 0x87, 0xE1, 0xC7, 0x30, 0xF3, 0x1F, 0xE3, 0xC7, 0x8F, 0x00, 0xF8, 0x07, 0xCE, 0x3C, 0x7F, 0x1C, 0xF1, 0xE0, 0xFE, 0x3C, 0x3C, 0x43, 0xC1, 0xE0, 0xF0, 0xE3, 0x0F, 0xC0, 0xFE, 0x30, 0xE3, 0x81, 0xC3, 0xC1, 0xE0, 0x3C, 0x78, 0x78, 0x8E, 0x1C, 0x38, 0xFE, 0x1C, 0x7C, 0x78, 0x78, 0x7C, 0x03, 0xFF, 0x1F, 0x07, 0x80, 0x3B, 0xE0, 0xC7, 0xF0, 0x07, 0xCE, 0x03, 0xC3, 0x0F, 0x80, 0x0F, 0xC0, 0x70, 0x8E, 0x1F, 0xC1, 0x9F, 0x00, 0x3C, 0xE0, 0x1F, 0xF0, 0xFF, 0x03, 0xF8, 0xE1, 0x9E, 0x01, 0xF0, 0x83, 0xC7, 0x01, 0xEF, 0x07, 0x3E, 0x1C, 0x1F, 0xC2, 0x1F, 0xC3, 0x39, 0xEF, 0x1C, 0xF0, 0x1F, 0x83, 0x00, 0xF0, 0x5F, 0x0E, 0x01, 0xC3, 0xC0, 0x3F, 0x98, 0x3F, 0x83, 0x1F, 0x00, 0x39, 0xE0, 0x0F, 0x1E, 0x1F, 0x8E, 0x1C, 0x67, 0x80, 0xC7, 0x06, 0xF0, 0xF0, 0xC3, 0x98, 0x70, 0xE1, 0xC2, 0x1C, 0x71, 0xC1, 0x8F, 0x01, 0xF0, 0xF3, 0xF0, 0x7E, 0xF0, 0xF8, 0x1F, 0xC3, 0x83, 0xF0, 0x18, 0xF8, 0x3F, 0x03, 0x8F, 0x07, 0xE0, 0xE1, 0xE2, 0x01, 0xF1, 0xC1, 0xF3, 0x00, 0x7E, 0x00, 0xF8, 0x01, 0xF1, 0x81, 0xFF, 0x03, 0x07, 0x8F, 0x80, 0xE1, 0xE3, 0xE1, 0x83, 0xF8, 0x01, 0xF7, 0x81, 0xF1, 0x83, 0xFF, 0x0F, 0x0E, 0x1E, 0x38, 0x1F, 0x9C, 0x03, 0x8F, 0x80, 0x7C, 0xFC, 0xC0, 0x7E, 0x0F, 0x1C, 0x1E, 0x3F, 0x00, 0x30, 0x1F, 0x80, 0xF0, 0x01, 0xF0, 0x3C, 0x78, 0x20, 0x07, 0xE3, 0x83, 0x0F, 0x1E, 0x00, 0xCE, 0x60, 0x3E, 0x3E, 0x07, 0xC0, 0x03, 0xC0, 0xFC, 0x70, 0x01, 0xFC, 0x00, 0x7C, 0x00, 0x0F, 0x07, 0x0F, 0x00, 0x00, 0xF1, 0x81, 0xE0, 0x1C, 0xE1, 0x00, 0x73, 0xE0, 0x01, 0xF0, 0x71, 0xE0, 0x3D, 0xF0, 0x61, 0xFC, 0x00, 0x3F, 0xE0, 0x00, 0x78, 0xF8, 0xF0, 0x3F, 0xC0, 0x0E, 0x60, 0x0F, 0x80, 0x00, 0x70, 0x00, 0x00, 0x78, 0xE0, 0x38, 0xF0, 0xFF, 0x06, 0x1F, 0x01, 0x0F, 0x07, 0x18, 0x73, 0x0C, 0x38, 0x3E, 0x0F, 0x01, 0xFC, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x03, 0x80, 0xE0, 0xE0, 0x1E, 0x70, 0x00, 0x20, 0xC2, 0x00, 0xFF, 0xC0, 0x03, 0xFE, 0x10, 0x04, 0xC3, 0x80, 0x00, 0xFF, 0x00, 0x0F, 0x19, 0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x00 ), "SH": ( 0x07, 0x81, 0xE0, 0x0E, 0x0F, 0x02, 0x1E, 0x3C, 0x1F, 0xF0, 0xC1, 0xF0, 0x71, 0xFE, 0x07, 0xC0, 0xF8, 0x1E, 0x1F, 0x00, 0x1F, 0x83, 0xDF, 0x07, 0x87, 0x00, 0xCF, 0x0F, 0x1C, 0x3E, 0x07, 0x03, 0xC4, 0x3E, 0x1E, 0x3C, 0x3F, 0x03, 0xE0, 0x0F, 0xE0, 0xF8, 0x7C, 0x1F, 0x03, 0xF0, 0x3F, 0x07, 0xFC, 0x0F, 0xFE, 0x07, 0xF0, 0x7C, 0x00, 0x7C, 0x1F, 0xC1, 0xF0, 0xF8, 0x1F, 0x83, 0xC1, 0xC3, 0xC0, 0xCF, 0xE0, 0xC3, 0xE0, 0xFE, 0x0E, 0x3C, 0x03, 0xC7, 0x80, 0xF0, 0x03, 0xC3, 0x80, 0x3E, 0x08, 0x7E, 0x0F, 0xC1, 0xC0, 0xFE, 0x0F, 0xF0, 0x3C, 0x78, 0x1F, 0x83, 0xFF, 0x03, 0xFE, 0x01, 0xF7, 0x03, 0xF0, 0xE3, 0xF0, 0x01, 0xF0, 0x7F, 0x00, 0xFE, 0x00, 0xFE, 0x01, 0xFE, 0x18, 0x3C, 0x70, 0xFC, 0x0E, 0x7C, 0x3F, 0x38, 0x3C, 0x7C, 0x03, 0xF0, 0x01, 0xF0, 0x03, 0xF0, 0x07, 0xF0, 0x3F, 0xF0, 0x3E, 0xE0, 0x0F, 0x80, 0xFF, 0x80, 0xFE, 0x07, 0x3C, 0x3C, 0x3E, 0x03, 0xE1, 0xCE, 0x1F, 0x03, 0xE0, 0xCF, 0x38, 0x3F, 0x83, 0x8F, 0x83, 0x0F, 0x80, 0x7E, 0x10, 0xF0, 0xF0, 0x71, 0xC3, 0xE0, 0x3E, 0x1C, 0x07, 0x87, 0x80, 0x1E, 0x1E, 0x38, 0xE0, 0xE1, 0xE0, 0xCF, 0x0F, 0xC1, 0xFF, 0x07, 0x81, 0xF8, 0x3F, 0x07, 0x07, 0xFE, 0x0F, 0xCE, 0x0F, 0x07, 0x30, 0xE1, 0xE0, 0xE1, 0xE1, 0x1F, 0x07, 0x07, 0x83, 0xFC, 0x3C, 0x3C, 0x38, 0x7C, 0x1F, 0x83, 0xC0, 0x00, 0xF8, 0x1F, 0x07, 0x07, 0x87, 0x1E, 0x07, 0xC0, 0x1E, 0x1F, 0x01, 0xFF, 0x80, 0xC1, 0xE0, 0xFE, 0x03, 0x3E, 0x60, 0x7F, 0x00, 0xE1, 0xF0, 0x7E, 0x0F, 0x1F, 0x07, 0x9E, 0x00, 0xFF, 0x0E, 0x1F, 0x00, 0xF8, 0x3C, 0x3F, 0x03, 0xF0, 0x1F, 0xC3, 0x83, 0xF8, 0x0F, 0xC0, 0x0F, 0xE0, 0x1F, 0x8F, 0x0F, 0x80, 0x70, 0xE0, 0xF8, 0x10, 0xF8, 0x11, 0xF8, 0x0F, 0xC0, 0x1E, 0x3C, 0x0F, 0x0F, 0x0F, 0x81, 0xF8, 0xC0, 0xFF, 0x80, 0x3E, 0x1E, 0x01, 0xF8, 0x11, 0xF0, 0x1F, 0x03, 0xC7, 0x0E, 0x0F, 0xF0, 0x7C, 0x40, 0xFE, 0x0F, 0x0F, 0x07, 0x1F, 0x0F, 0xC1, 0xE0, 0xF1, 0xC0, 0xFC, 0x07, 0xC6, 0x0C, 0x7C, 0x30, 0xFC, 0x03, 0xF0, 0x78, 0x7C, 0x1C, 0x7C, 0x1F, 0x00, 0x78, 0xF8, 0x1F, 0xF8, 0x61, 0xE1, 0xC0, 0x7E, 0x07, 0xC0, 0x03, 0xF0, 0x31, 0xF8, 0x3F, 0x80, 0xFF, 0x87, 0x87, 0xE0, 0x7C, 0x3C, 0x0F, 0xC0, 0xFC, 0x07, 0x8F, 0x03, 0xF0, 0x03, 0xFC, 0x0F, 0x83, 0x83, 0xE1, 0xE1, 0xF8, 0x3F, 0x1C, 0x01, 0xF0, 0x7E, 0x07, 0x0F, 0xC0, 0x7F, 0x01, 0xF8, 0x01, 0xF0, 0x78, 0xF0, 0x41, 0xF0, 0x70, 0xF0, 0x3C, 0x78, 0x38, 0xE3, 0x07, 0x1E, 0x1C, 0x1F, 0x01, 0xE1, 0x83, 0xF0, 0xC3, 0xE0, 0x7F, 0xE0, 0xF8, 0xF8, 0x07, 0xE0, 0xF0, 0xE1, 0xC3, 0xC1, 0xF3, 0xC0, 0xF8, 0x3F, 0x0F, 0x07, 0x83, 0xC3, 0xC0, 0xFF, 0x03, 0xE0, 0x07, 0xE0, 0x7F, 0x00, 0x3C, 0x78, 0x78, 0x3D, 0xE0, 0xF0, 0xF8, 0x0F, 0xF0, 0x07, 0xE0, 0x78, 0x7C, 0x1F, 0x80, 0x1F, 0x07, 0x0F, 0x80, 0x7F, 0x81, 0xE1, 0xE0, 0x0F, 0xC1, 0x07, 0xC0, 0xFF, 0x00, 0xF8, 0x3C, 0x03, 0xE0, 0xF8, 0x7C, 0x03, 0x8F, 0x07, 0x83, 0xF0, 0xFF, 0x0C, 0x70, 0x3F, 0x0F, 0x0F, 0x80, 0x78, 0xE1, 0xC0, 0xF0, 0x70, 0xFC, 0x1C, 0x1F, 0x1E, 0x38, 0xF8, 0x78, 0x7C, 0x3C, 0x1E, 0x70, 0xE1, 0xF0, 0xF3, 0x83, 0x8E, 0x38, 0x60, 0xF0, 0x79, 0xE0, 0xFC, 0x1E, 0x3C, 0x61, 0xF0, 0x73, 0xC0, 0x1E, 0x3C, 0x1F, 0x01, 0xF0, 0xF1, 0xC3, 0xC1, 0xF0, 0x00, 0x7F, 0xC1, 0xFF, 0x80, 0x3C, 0x3C, 0x7F, 0x03, 0xE0, 0xF8, 0x78, 0x3F, 0x81, 0xE0, 0xF0, 0x7F, 0xC0, 0xFE, 0x00, 0xF0, 0xE0, 0x07, 0x81, 0xFC, 0x03, 0xE0, 0xF0, 0x7C, 0x0E, 0x3C, 0x3E, 0x07, 0xC0, 0x73, 0xC3, 0xFC, 0x0F, 0xF8, 0x78, 0x7C, 0x1F, 0x80, 0xF8, 0x07, 0x0F, 0x83, 0xF0, 0x78, 0x0F, 0xE0, 0xFC, 0x03, 0x8F, 0x07, 0x87, 0xC0, 0xF8, 0xE0, 0x1F, 0x07, 0xF0, 0xF8, 0x7E, 0x38, 0x3F, 0x00, 0x3F, 0x81, 0x9F, 0x81, 0xFE, 0x00, 0xFC, 0x03, 0xF0 ), "TZ": ( 0xFE, 0xEF, 0x79, 0xCF, 0x73, 0x9C, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xE1, 0xFF, 0x9E, 0xFF, 0xBF, 0xFF, 0xFB, 0xFF, 0xFF, 0xFF, 0x1C, 0xF3, 0x80, 0x0E, 0x30, 0x8E, 0x40, 0x64, 0x20, 0x87, 0x19, 0xFF, 0xF8, 0xFF, 0x7C, 0xEF, 0x9E, 0xF3, 0x9F, 0xFF, 0x3C, 0xE3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x0C, 0x7D, 0xF7, 0xEF, 0xF7, 0xFF, 0xDF, 0xEF, 0xF7, 0xFF, 0xC7, 0xFC, 0xC0, 0x1E, 0x00, 0x3E, 0x18, 0x80, 0x20, 0x03, 0x84, 0xE7, 0x3F, 0xBE, 0xE7, 0xFF, 0xE3, 0x7F, 0x9C, 0xFF, 0x39, 0xEE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x80, 0x3F, 0xFF, 0xFF, 0xF3, 0xFF, 0xDD, 0xFF, 0xBD, 0xFF, 0xE7, 0x3F, 0xE0, 0x03, 0x06, 0x1C, 0xC4, 0x61, 0x00, 0x18, 0xC8, 0x77, 0xF3, 0x1E, 0xF1, 0xFC, 0xE7, 0x73, 0xFC, 0xFE, 0xE3, 0x9E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x3E, 0x1F, 0xFF, 0xFF, 0xFE, 0xF3, 0xFF, 0xFF, 0xF7, 0x3F, 0xF9, 0xE7, 0xF8, 0x80, 0x82, 0x03, 0x73, 0x00, 0x00, 0x08, 0xFC, 0x61, 0xFF, 0x9E, 0xFC, 0xE7, 0xB8, 0xEF, 0xFE, 0xF3, 0xCF, 0xCF, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC7, 0xFF, 0x3F, 0xFF, 0xFF, 0xFF, 0xCF, 0x7F, 0xDF, 0x9C, 0xE7, 0xFC, 0x00, 0x70, 0x83, 0xE3, 0x10, 0x86, 0x00, 0x7E, 0x38, 0xFC, 0x7E, 0xF3, 0x9F, 0xCF, 0x1F, 0xBF, 0x1F, 0xFE, 0xFF, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x78, 0xFF, 0xF3, 0xDF, 0x8F, 0xFF, 0xF7, 0xFF, 0xBF, 0xEF, 0x73, 0xCF, 0x20, 0x06, 0x00, 0x7C, 0x20, 0x31, 0x80, 0xC7, 0x30, 0xC7, 0xEE, 0xE7, 0x3C, 0xFF, 0xF1, 0xFE, 0xF3, 0xFC, 0xFE, 0xFC, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0xC1, 0x8F, 0xFF, 0x7F, 0xFF, 0x9C, 0xFF, 0xBF, 0xFF, 0xFF, 0xF9, 0x99, 0xE0, 0xC0, 0x61, 0x80, 0xEE, 0x02, 0x38, 0x30, 0x7E, 0x33, 0xDC, 0xC7, 0xFF, 0x3F, 0x9C, 0xE7, 0x7F, 0x33, 0xC3, 0x8F, 0xE6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x07, 0x07, 0x1F, 0xBF, 0xFF, 0xFD, 0xF7, 0xDE, 0xFF, 0xFF, 0xFF, 0x79, 0xFC, 0xE0, 0x01, 0x81, 0x07, 0x00, 0x04, 0x00, 0x61, 0xF3, 0x79, 0x8F, 0x9C, 0xF0, 0xFD, 0xF6, 0xDC, 0xE6, 0xE7, 0x1F, 0x7E, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0xEC, 0xFF, 0xFB, 0xFF, 0xCF, 0xFF, 0x9C, 0xE7, 0xF9, 0xFF, 0xFC, 0xC1, 0x80, 0x03, 0x00, 0x1F, 0x00, 0x00, 0xC2, 0x03, 0xE3, 0x1F, 0xF1, 0xFF, 0xFC, 0xF3, 0x8E, 0x7F, 0x73, 0xB9, 0xCF, 0x3E, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x70, 0xED, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0xEF, 0xFF, 0xE3, 0x8F, 0x8C, 0x80, 0x0E, 0x30, 0xF9, 0xC0, 0x00, 0x00, 0x0F, 0x63, 0x0F, 0x38, 0xDF, 0x1C, 0xFF, 0xF7, 0x3F, 0xCF, 0x7E, 0xF3, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x03, 0xE0, 0x1E, 0xF3, 0xFF, 0xFF, 0xFF, 0xF8, 0xFE, 0xF3, 0x9C, 0xEC, 0x7B, 0x8C, 0x02, 0x00, 0x07, 0x20, 0x03, 0x00, 0x00, 0xF1, 0x87, 0xF7, 0x3B, 0xBE, 0xFF, 0xF7, 0x3F, 0xFC, 0xCE, 0x7B, 0x9E, 0xFC, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x0E, 0x30, 0xFF, 0xF3, 0xFF, 0xCF, 0xFF, 0xFF, 0xFE, 0x7C, 0xF3, 0xB9, 0x79, 0x80, 0x07, 0x00, 0x33, 0x00, 0x71, 0x8C, 0x60, 0xC3, 0xCF, 0x73, 0xBC, 0xCE, 0x79, 0xFE, 0x3F, 0xF7, 0x1E, 0xFC, 0xE7, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x61, 0x01, 0xBF, 0xDF, 0x7F, 0xFF, 0xFF, 0xFF, 0xCF, 0xFB, 0x9F, 0xEF, 0x77, 0x63, 0x00, 0x18, 0x01, 0xC7, 0x00, 0x30, 0x18, 0xDE, 0x73, 0x7D, 0x8F, 0x9E, 0xFE, 0xFF, 0x8F, 0x9F, 0x0F, 0xF3, 0x87, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x80, 0x03, 0x0E, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF ), "TH": ( 0xC7, 0x37, 0xB3, 0x9C, 0x73, 0x98, 0xE3, 0x86, 0x40, 0xC6, 0x80, 0x38, 0x30, 0x8E, 0x70, 0xE7, 0x1D, 0x87, 0x39, 0xBC, 0xC6, 0x71, 0xE3, 0x11, 0xC7, 0x1C, 0xF3, 0x01, 0xCE, 0x64, 0x31, 0xCE, 0x60, 0xE2, 0x31, 0xC6, 0x18, 0xE7, 0x87, 0x00, 0x80, 0x0C, 0x63, 0x1C, 0xE7, 0x79, 0xC3, 0xE0, 0xC0, 0x00, 0x0C, 0x70, 0xC3, 0xE7, 0x8E, 0x60, 0x0E, 0x78, 0x38, 0x70, 0xE1, 0x30, 0xC1, 0xC3, 0x39, 0x9C, 0x3E, 0x78, 0xF1, 0xCE, 0xC7, 0x1C, 0x1C, 0x0F, 0x1C, 0x7E, 0x78, 0xD1, 0xC7, 0x07, 0xCE, 0x3E, 0x39, 0x99, 0xF8, 0xE3, 0xB1, 0xCD, 0x8E, 0x39, 0xCE, 0x66, 0x18, 0xF8, 0xC7, 0x38, 0xCE, 0x18, 0xE3, 0x8C, 0xE1, 0xC3, 0x81, 0xC7, 0x18, 0xC3, 0x38, 0x1C, 0x71, 0xC0, 0xF0, 0xC7, 0x38, 0xCE, 0x38, 0x70, 0x0C, 0xC6, 0x33, 0x88, 0x0E, 0x70, 0x30, 0xC7, 0x67, 0x18, 0xC3, 0x39, 0x9C, 0xE3, 0x1C, 0xCE, 0x71, 0x88, 0x0C, 0x3C, 0x61, 0xF1, 0x8F, 0xFF, 0x8E, 0x70, 0x3C, 0x70, 0xC1, 0x81, 0x0E, 0x23, 0x1C, 0x4E, 0x70, 0x61, 0x81, 0x86, 0x03, 0x00, 0x00, 0x38, 0x03, 0xC3, 0x87, 0xC7, 0x87, 0x1E, 0x0E, 0x7C, 0xF1, 0xF1, 0x8E, 0x73, 0x9F, 0xCF, 0x1E, 0x3B, 0x9C, 0xF0, 0xC1, 0xF3, 0x1C, 0xE3, 0x1C, 0xCC, 0x78, 0x70, 0xF1, 0x8E, 0x0C, 0x87, 0x30, 0x18, 0xE1, 0x88, 0x7C, 0x33, 0xCE, 0x38, 0x38, 0x61, 0x83, 0x0E, 0x01, 0x98, 0x00, 0xCE, 0x31, 0xCC, 0x8F, 0x07, 0x0F, 0x38, 0x37, 0x1C, 0xE1, 0xC3, 0x78, 0xF1, 0x8C, 0x38, 0x70, 0xC1, 0xC3, 0x9C, 0x8E, 0x4D, 0x1C, 0x60, 0x38, 0xCE, 0x72, 0x1C, 0x70, 0x01, 0xC7, 0xC3, 0x03, 0xC7, 0x0F, 0x51, 0xF1, 0x98, 0xE3, 0xE0, 0xDC, 0x71, 0x8E, 0x70, 0x38, 0xC6, 0x30, 0xC7, 0x0C, 0x03, 0x82, 0x00, 0xC7, 0x1C, 0x39, 0x9F, 0x82, 0x01, 0x06, 0x70, 0x38, 0x00, 0x00, 0x01, 0xB9, 0xC7, 0xFC, 0xF3, 0x9E, 0x67, 0xFF, 0xFF, 0xBD, 0xFC, 0xE7, 0xC7, 0xCF, 0x79, 0x9E, 0x33, 0x38, 0xC4, 0x63, 0x0C, 0xE3, 0x1C, 0xC6, 0x7D, 0xE6, 0x0F, 0x70, 0x78, 0xE6, 0x6F, 0x38, 0xE3, 0x00, 0x02, 0x08, 0x73, 0x9C, 0xE7, 0xF3, 0x7F, 0x8F, 0x1C, 0x61, 0x80, 0xE1, 0x08, 0xE3, 0x1E, 0xFC, 0x33, 0x8E, 0x7E, 0x70, 0xE7, 0x10, 0x8C, 0x00, 0x0E, 0x3C, 0x62, 0x00, 0xC7, 0x01, 0xC6, 0x38, 0xCC, 0x71, 0x80, 0xF1, 0xC6, 0x10, 0x66, 0x38, 0xC7, 0x3C, 0xC6, 0x73, 0x18, 0x3C, 0x30, 0xF8, 0x63, 0x91, 0x30, 0xC7, 0x39, 0x8C, 0x67, 0x39, 0xCF, 0xE3, 0x99, 0x8E, 0x71, 0xC8, 0xE3, 0x18, 0xC3, 0x03, 0x00, 0x0E, 0x70, 0x02, 0x1C, 0x1C, 0xC3, 0xC3, 0x1C, 0x78, 0x71, 0xC1, 0xE7, 0x39, 0xC7, 0xF0, 0x78, 0x7C, 0x79, 0x19, 0xC6, 0x39, 0xC7, 0x1E, 0x23, 0x06, 0x33, 0x38, 0xF1, 0xCF, 0x71, 0x8C, 0x71, 0x38, 0x3C, 0xE7, 0x38, 0x07, 0x31, 0x84, 0x03, 0x8C, 0x18, 0x60, 0xE1, 0xC6, 0x7F, 0x87, 0x39, 0x8E, 0x06, 0x18, 0xE3, 0x81, 0xC7, 0x78, 0xF4, 0x78, 0xF0, 0xC8, 0x87, 0x1C, 0x0E, 0x38, 0x1C, 0x71, 0x18, 0xC3, 0x08, 0x03, 0x0F, 0x3F, 0xF9, 0xC7, 0x39, 0xC6, 0x73, 0x9C, 0xE4, 0x38, 0x7E, 0x38, 0x8E, 0x63, 0x1E, 0x06, 0x71, 0x86, 0xE3, 0x80, 0xC3, 0x11, 0x8C, 0x00, 0x0E, 0x07, 0x04, 0x06, 0x30, 0xC1, 0xE7, 0x3D, 0xE1, 0xC6, 0x77, 0x38, 0x38, 0x78, 0x30, 0xDC, 0x61, 0xCE, 0x33, 0x1C, 0xE2, 0x38, 0xEC, 0xE3, 0x1C, 0xE3, 0x30, 0xE0, 0xC3, 0x01, 0xC7, 0x38, 0x39, 0x99, 0x39, 0x87, 0xC1, 0xC7, 0x18, 0xC6, 0x63, 0x9C, 0xE7, 0x3E, 0x38, 0x71, 0xCE, 0x23, 0x0F, 0x1E, 0x79, 0x01, 0x99, 0xC1, 0xC7, 0x83, 0x8C, 0x78, 0xF0, 0xE0, 0x38, 0x78, 0x0E, 0x0F, 0x8E, 0x38, 0xC7, 0xC7, 0x0E, 0xE6, 0x33, 0x8E, 0xC0, 0xE3, 0x1C, 0x70, 0x0E, 0x39, 0x8F, 0x0E, 0x1C, 0xC3, 0xE7, 0x81, 0xE3, 0x82, 0x30, 0x20, 0x7C, 0x78, 0x71, 0x63, 0x87, 0x0E, 0x63, 0x1E, 0x3C, 0x71, 0xF8, 0xE7, 0x38, 0xC2, 0xE3, 0x19, 0x8E, 0x71, 0x8C, 0xC7, 0x39, 0x3F, 0xC7, 0x39, 0x8E, 0x30, 0xCE, 0x70 ), "ZH": ( 0x00, 0xF8, 0x3F, 0xE1, 0xFF, 0xF7, 0x8F, 0xFF, 0xC1, 0xFC, 0x0F, 0x01, 0xE3, 0xC0, 0xFF, 0x01, 0xC1, 0xE6, 0x0F, 0xF0, 0x03, 0xF8, 0x3F, 0x00, 0x3F, 0x00, 0xFF, 0x80, 0x7F, 0xF0, 0x3F, 0xC0, 0x7F, 0x81, 0xFF, 0x06, 0x1F, 0x00, 0x7C, 0x00, 0x0E, 0x01, 0xE0, 0x03, 0xC7, 0xF8, 0x3F, 0xFE, 0x3F, 0xFC, 0x3F, 0x07, 0xDE, 0x1E, 0x06, 0x3C, 0x07, 0x83, 0x80, 0xFC, 0x00, 0xFF, 0x87, 0x1E, 0x3C, 0x7C, 0x7C, 0x3F, 0x07, 0x80, 0x7C, 0x38, 0x3C, 0x20, 0xF8, 0x3F, 0x00, 0x18, 0x07, 0xC0, 0x01, 0xC0, 0xF0, 0x03, 0xE0, 0x07, 0xE0, 0x3F, 0xC1, 0xFF, 0xE0, 0x7F, 0xE0, 0xFC, 0x7C, 0x1C, 0xC0, 0x1F, 0xC0, 0x3F, 0x80, 0x0C, 0x3E, 0x1E, 0x0F, 0xC0, 0xFC, 0x3F, 0xC0, 0x1E, 0x38, 0x03, 0xF0, 0x3F, 0xF0, 0x78, 0xF8, 0xE0, 0xFE, 0x07, 0x80, 0xF8, 0x00, 0x3C, 0x0F, 0x00, 0x07, 0x80, 0x1F, 0x88, 0x1F, 0xFE, 0x7F, 0xFC, 0x7F, 0xF0, 0x1F, 0x87, 0x03, 0xE0, 0xC0, 0xF0, 0xC3, 0xC0, 0x83, 0xE0, 0xFE, 0x07, 0xC7, 0x0F, 0xC0, 0x7C, 0x7C, 0x38, 0xFF, 0x03, 0xFE, 0x07, 0x80, 0x7C, 0x1F, 0x00, 0x1C, 0x00, 0xF0, 0x03, 0xC0, 0x00, 0x00, 0x40, 0x01, 0xFC, 0x0F, 0xC1, 0xFF, 0x0F, 0xFF, 0x07, 0xFE, 0x03, 0xF8, 0x0F, 0xF8, 0x03, 0xF0, 0x00, 0xFC, 0x07, 0x1E, 0x01, 0xE3, 0xC0, 0xFF, 0x00, 0xFF, 0xC0, 0xF8, 0x3C, 0x7C, 0x80, 0x1F, 0x07, 0x80, 0xF8, 0xF0, 0x03, 0xE0, 0x00, 0xF0, 0x03, 0x00, 0x07, 0x00, 0x0F, 0xF0, 0x7C, 0x7F, 0xF3, 0xFF, 0x1F, 0xE0, 0x3F, 0x87, 0x80, 0x3F, 0xC0, 0x0F, 0xC0, 0x71, 0xF8, 0x18, 0xF0, 0x1F, 0xE0, 0xFC, 0x70, 0xF0, 0x7C, 0x1C, 0xE7, 0x87, 0x1F, 0x07, 0x8F, 0x81, 0xFF, 0xE0, 0x78, 0x01, 0x80, 0x20, 0x00, 0x00, 0x03, 0xE0, 0x07, 0xFE, 0x3C, 0x0F, 0xFC, 0x7E, 0x3F, 0xE0, 0x1F, 0x80, 0xF8, 0x03, 0xF8, 0x01, 0xFF, 0x80, 0x7F, 0x07, 0x83, 0xF0, 0xF8, 0xFE, 0x1F, 0x00, 0x39, 0xF0, 0x0F, 0x80, 0x3F, 0x03, 0xE3, 0xC0, 0x7F, 0xE0, 0x0F, 0x80, 0xF0, 0x00, 0x86, 0x00, 0x00, 0x78, 0x00, 0xFC, 0x07, 0xF1, 0xFF, 0xC0, 0xFF, 0x0F, 0xFC, 0x3F, 0xC0, 0xF3, 0x83, 0xC0, 0xE1, 0xC0, 0x03, 0xE1, 0xC1, 0x8F, 0x83, 0xE0, 0xFC, 0x1F, 0xE3, 0x07, 0x87, 0x1E, 0x3E, 0x0C, 0x3E, 0x07, 0xE0, 0xE1, 0xC0, 0x0F, 0xC0, 0x3F, 0x00, 0x00, 0x00, 0xC0, 0x00, 0x03, 0xE0, 0x07, 0xFF, 0xE3, 0xFF, 0x0F, 0xCF, 0xC1, 0xF8, 0x07, 0xE0, 0x7F, 0xC0, 0xE3, 0xC0, 0x7F, 0x0C, 0x3F, 0x00, 0xFC, 0x0F, 0x8E, 0x1F, 0x3C, 0xC1, 0xFF, 0x03, 0xE7, 0xC0, 0xFF, 0x00, 0xFF, 0x00, 0x3F, 0x80, 0x3F, 0x80, 0x3F, 0x00, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFE, 0x1F, 0xFE, 0x70, 0xF8, 0x3F, 0x81, 0xF8, 0x70, 0x03, 0xFE, 0x07, 0xF0, 0x00, 0x7C, 0x38, 0x78, 0xF8, 0x3F, 0xE0, 0xF0, 0xF9, 0xC0, 0xFE, 0x07, 0xFF, 0x03, 0xF0, 0x1F, 0x83, 0x00, 0xFC, 0x00, 0xFE, 0x01, 0xC0, 0x07, 0xC0, 0x3F, 0x00, 0xFC, 0x3F, 0xE3, 0xFF, 0xFB, 0xF0, 0x7F, 0xC0, 0x0F, 0xE0, 0x00, 0x01, 0xFC, 0x07, 0xFF, 0xC0, 0x0F, 0xFC, 0x03, 0xF8, 0x0F, 0xF0, 0x07, 0xE0, 0xF7, 0xC1, 0xE1, 0xFF, 0x80, 0xFE, 0x01, 0xF0, 0x1F, 0xC0, 0x00, 0x00, 0x00, 0x3F, 0x80, 0x1F, 0xC0, 0x3F, 0x07, 0xFF, 0x1F, 0xBF, 0xC0, 0x3F, 0x8F, 0xE0, 0x07, 0xF0, 0x60, 0x7F, 0x00, 0xFF, 0xE0, 0x1F, 0x0F, 0x07, 0x0F, 0x9C, 0x3E, 0xF0, 0x7E, 0x03, 0xC3, 0xE0, 0x86, 0xFC, 0x07, 0xE0, 0xF8, 0x00, 0x7E, 0x00, 0x38, 0x00, 0x00, 0xC0, 0x00, 0x00, 0xFF, 0x01, 0xFF, 0xFE, 0x0F, 0xFF, 0x03, 0xFE, 0xF0, 0x7C, 0xFC, 0x1E, 0x00, 0x7E, 0x00, 0xFE, 0x00, 0x3F, 0x00, 0xF8, 0x7E, 0x3E, 0xF0, 0xF0, 0x70, 0xF3, 0x83, 0xC0, 0xF8, 0x78, 0x03, 0xF0, 0xE0, 0x07, 0xF0, 0x07, 0xF0, 0x00, 0xE0, 0x00, 0x00, 0x01, 0xFF, 0x80, 0xFF, 0x81, 0xFF, 0xF0, 0xFF, 0xE7, 0xE0, 0x3F, 0xF0, 0x03, 0x8F, 0x01, 0xC1, 0xEF, 0x00, 0x7F, 0xF0, 0x3F, 0xC1, 0xFF, 0xC0, 0xFF, 0x00, 0x3F ) } ################################################################################ # # # text-to-phonemes translation # # # ################################################################################ def ensure_text_alphanumeric( text=None, characters_pass= \ " 0123456789abcdefghijklmnopqrstuvwxyABCDEFGHIJKLMNOPQRSTUVWXYZ" ): return "".join(filter(set(characters_pass).__contains__, text)).strip( "\n") def make_regex_fragment_from_rules_English_to_phonemes_special_symbols( rule_pattern=None ): regex = r"" for character in rule_pattern: regex += rules_English_to_phonemes_special_symbols.get( character, character ) return regex def make_rule_regex( rule_text=None ): character_string, left_context, right_context, phoneme = rule_text.split( "/") rule = r"" if left_context: # Use a non-capturing group to match the left context. rule += \ r"(?:" + \ make_regex_fragment_from_rules_English_to_phonemes_special_symbols( rule_pattern=left_context ) + \ ")" # Create a capturing group for the character string. rule += r"(?P<found>" + character_string + ")" if right_context: # Add a lookahead pattern. rule += \ r"(?=" + \ make_regex_fragment_from_rules_English_to_phonemes_special_symbols( rule_pattern=right_context ) + \ ")" # Return a tuple containing the regex created from the rule, a lower-case # representation of the phonemes between dashes and the original rule. return rule, "-{phoneme}-".format(phoneme=phoneme.lower()), rule_text def match_and_replace( text=None, rule=None, phoneme=None ): """ Replace found text from a single rule. """ # Find all rule matches. matches = [(match.start(), match.end()) for \ match in re.finditer(rule, text)] # Start from behind, so replace in-place. matches.reverse() # Convert to characters because strings are immutable. characters = list(text) for start, end in matches: characters[start:end] = phoneme # Convert back to string. return "".join(characters) rules_English_to_phonemes_regex = [ make_rule_regex(rule_text=rule) for rule in rules_English_to_phonemes ] def text_to_phonemes( text=None, explain=False, phonemes_dictionary=phonemes_dictionary ): """ Extract phonemes from words. """ if explain: print("\ntranslation printout:") print("text: {text}".format(text=text)) text = ensure_text_alphanumeric(text=text) # Add space around words for compatibility with rules containing spaces. result = " {text} ".format(text=text.upper()) step = 0 # Iterate over all the interesting tuples. for rule, phoneme, rule_text in rules_English_to_phonemes_regex: # For each rule, 'tmp' is the string in which all matches for 'rule' # have been replaced by 'phoneme'. tmp = match_and_replace( text=result, rule=rule, phoneme=phoneme ) if explain and tmp != result: step += 1 message = \ "step {step}: {result} ---> {tmp} [rule: {rule_text} ({rule})]" print(message.format( step=step, result=result, tmp=tmp, rule_text=rule_text, rule=rule )) result = tmp # remove artifacts result_artifacts_removed = result.replace( "- -", " " ).replace( "--", "-" ).strip( " " ).strip( "-" ).replace( "--", "-" ) if explain: print("result: {result}\n".format(result=result_artifacts_removed)) # make uppercase result_uppercase = result_artifacts_removed.upper() # remove junk acceptable_phonemes = [ key for key, value in phonemes_dictionary.iteritems() ] result_cleaning = [] for word in result_uppercase.split(" "): tmp_word = [] for word_phoneme in word.split("-"): if word_phoneme in acceptable_phonemes: tmp_word.append(word_phoneme) if word_phoneme == "I": tmp_word.append("AH-EE") if word_phoneme == "EEH": tmp_word.append("EH") result_cleaning.append("-".join(tmp_word)) result = " ".join(result_cleaning).strip() return result ################################################################################ # # # amplitude data and sound output # # # ################################################################################ def amplitude_data_to_binary_data( values, minimum=-1, maximum=1 ): values = datavision.normalize_to_range( values, minimum=-1, maximum=1 ) values_binary = "" for value in values: values_binary = values_binary + chr(int(value * 127 + 128)) return values_binary def play_values( values=None, bitrate=15300 ): binary_data = amplitude_data_to_binary_data(values) with propyte.silence(): stream = pyaudio.PyAudio().open( format=pyaudio.PyAudio().get_format_from_width(1), channels=1, rate=bitrate, output=True ) stream.write(binary_data) stream.stop_stream() stream.close() pyaudio.PyAudio().terminate() def save_values_to_wave_file( values=None, filename=None, maximum_amplitude=65535, # maximum value of unsigned short 16 bit number sample_rate=44100, # Hz number_of_channels=1, sample_width=2 # bytes per frame ): values = datavision.normalize_to_range( values, minimum=-(maximum_amplitude / 2), maximum=maximum_amplitude / 2 ) file_output = wave.open(filename, "w") file_output.setnchannels(number_of_channels) file_output.setsampwidth(sample_width) file_output.setframerate(sample_rate) for value in values: write_data = struct.pack("<h", value) file_output.writeframesraw(write_data) file_output.writeframes("") file_output.close() def phoneme_values( phoneme=None, length=575, dimensions=1 ): data = [int(str(i), 10) for i in phonemes_dictionary[phoneme]] if length == 575: if dimensions == 1: return data elif dimensions == 2: return (range(0, len(data)), data) else: if dimensions == 1: data = shijian.change_list_resolution( values=data, length=length ) return data elif dimensions == 2: data_x, data_y = shijian.change_list_resolution( values=data, length=length, dimensions=2 ) return (data_x, data_y) def phonemes_values( phonemes_string=None, phonemes_dictionary=phonemes_dictionary ): acceptable_phonemes = \ [key for key, value in phonemes_dictionary.iteritems()] values = [] for phoneme in phonemes_string.split("-"): if phoneme in acceptable_phonemes: values.extend(phoneme_values(phoneme=phoneme, length=2000)) return values def phonemes_words_values( phonemes_words=None, change_waveform_to_rectangle_waveform=True ): """ This function converts sentences in phoneme form to amplitude values. """ values = [] for phonemes_string in phonemes_words.split(" "): values.extend( phonemes_values( phonemes_string=phonemes_string ) ) values.extend( phoneme_values( phoneme="space", length=2000 ) ) values.extend( phoneme_values( phoneme="space", length=2000 ) ) if change_waveform_to_rectangle_waveform is True: values = shijian.change_waveform_to_rectangle_waveform( values=values ) return values def speak_phoneme( phoneme=None ): for value in phoneme_values( phoneme=phoneme, length=2000 ): print(value) def speak_phonemes( phonemes_string=None ): for phoneme in phonemes_string.split("-"): speak_phoneme(phoneme=phoneme) def speak_phonemes_words( phonemes_words=None ): for phonemes_string in phonemes_words.split(" "): speak_phonemes(phonemes_string) speak_phoneme( phoneme="space", length=2000 ) def say( text=None, phonemes=None, save_to_file=False, filename_output=None, explain=False, split_sentences=True, translate_numbers=True ): text = ensure_text_alphanumeric(text=text) if text is not None: if translate_numbers is True: if explain: print("\nreplace numbers in text with English text:") print("text with numbers:\n{text}".format(text=text)) text = shijian.replace_numbers_in_text_with_English_text( text=text ) if explain: print("text with numbers replaced:\n{text}".format(text=text)) if save_to_file is not True: if split_sentences is True: # Split text into sentences. text = text.split(".") for sentence in text: _phonemes = text_to_phonemes( text=sentence, explain=explain ) _data = phonemes_words_values( phonemes_words=_phonemes ) play_values( values=_data ) else: _phonemes = text_to_phonemes( text=text, explain=explain ) _data = phonemes_words_values( phonemes_words=_phonemes ) play_values( values=_data ) else: _phonemes = text_to_phonemes( text=text, explain=explain ) _data = phonemes_words_values( phonemes_words=_phonemes ) save_values_to_wave_file( values=_data, filename=filename_output, sample_rate=15300 ) elif text is None and phonemes is not None: if save_to_file is not True: _phonemes = phonemes _data = phonemes_words_values( phonemes_words=_phonemes ) play_values( values=_data ) else: _phonemes = phonemes _data = phonemes_words_values( phonemes_words=_phonemes ) save_values_to_wave_file( values=_data, filename=filename_output, sample_rate=15300 ) ################################################################################ # # # analysis # # # ################################################################################ def analysis( visual=True, sound=True ): if visual is True: print("\nanalysis visual") print("\nsave histograms phonemes\n") save_histograms_phonemes( phonemes_dictionary=phonemes_dictionary ) print("\nsave multigraph comparisons of phonemes resolutions\n") save_multigraph_comparison_resolutions_phonemes( phonemes_dictionary=phonemes_dictionary ) print( "\nsave graph comparisons of phonemes data and " "FFT synthesized phonemes data\n" ) save_graph_phoneme_data_versus_FFT_synthesized_data() if sound is True: print("\nanalysis sound") number_of_words = 50 print("\nspeak {number} most frequent Brown Corpus words\n".format( number=number_of_words )) speak_most_frequent_Brown_Corpus_words( number=number_of_words ) def save_graph_phoneme( phoneme=None, title=None, title_axis_x="time", title_axis_y="amplitude", filename=None, directory="images", overwrite=True, color="black", LaTeX=False, marker_size=1, aspect=0.3 ): if title is None: title = "amplitude: phoneme {phoneme}".format( phoneme=phoneme ) if filename is None: filename = "amplitude_phoneme_{phoneme}.png".format( phoneme=phoneme ) datavision.save_graph_matplotlib( values=phoneme_values( phoneme=phoneme, length=2000 ), title=title, title_axis_x=title_axis_x, title_axis_y=title_axis_y, filename=filename, directory=directory, overwrite=overwrite, color=color, LaTeX=LaTeX, markers=False, marker_size=marker_size, aspect=aspect, line=True, line_style="k-", font_size=4 ) def save_graph_phoneme_data_versus_FFT_synthesized_data( sample_rate=20000 ): phonemes_frequency_contributions = \ greatest_frequency_contributions_phonemes( save_plots=False ) for key, value in phonemes_frequency_contributions.iteritems(): message = "save graph comparison phoneme {phoneme} versus " + \ "FFT synthesized phoneme data" print(message.format( phoneme=key )) values_amplitude, values_time = \ datavision.generate_composite_sine_values( frequencies=phonemes_frequency_contributions[key], sample_rate=sample_rate, time=1 ) data = phoneme_values( phoneme=key, length=sample_rate ) datavision.save_multigraph_matplotlib( variables=[values_amplitude.tolist(), data], variables_names=["synthetic", "data"], title="synthetic_versus_data_" + key, label_x="time", label_y="amplitude", filename="synthetic_versus_data_" + key + ".png", directory="images", overwrite=True, LaTeX=False, marker_size=0.5, palette_name="palette1" ) def save_multigraph_comparison_resolution_phoneme( phoneme=None, length_1=575, length_2=2000 ): datavision.save_multigraph_2D_matplotlib( variables_x=[ phoneme_values( phoneme=phoneme, length=length_1, dimensions=2 )[0], phoneme_values( phoneme=phoneme, length=length_2, dimensions=2 )[0] ], variables_y=[ phoneme_values( phoneme=phoneme, length=length_1, dimensions=2 )[1], phoneme_values( phoneme=phoneme, length=length_2, dimensions=2 )[1] ], variables_names=[ "575", "2000" ], title="resolutions phoneme {phoneme}".format( phoneme=phoneme ), label_x="time", label_y="amplitude", filename="resolutions_phoneme_{phoneme}.png".format( phoneme=phoneme ), directory="images", markers=False, marker_size=2, line=True, line_style="-", aspect=0.3, palette_name="palette1" ) def save_multigraph_comparison_resolutions_phonemes( phonemes_dictionary=phonemes_dictionary, length_1=575, length_2=2000, ): for key, value in phonemes_dictionary.iteritems(): print("save phoneme {phoneme} resolutions comparison".format( phoneme=key )) save_multigraph_comparison_resolution_phoneme( phoneme=key, length_1=length_1, length_2=length_2 ) def save_histogram_phoneme( phoneme=None, directory="images" ): datavision.save_histogram_matplotlib( phoneme_values( phoneme=phoneme, length=2000 ), filename="histogram_phoneme_{phoneme}.png".format( phoneme=phoneme ), directory=directory, title="histogram: phoneme {phoneme}".format( phoneme=phoneme ), label_x="value", label_y="frequency", color_fill="#000000" ) def save_histograms_phonemes( phonemes_dictionary=phonemes_dictionary ): for key, value in phonemes_dictionary.iteritems(): print("save histogram phoneme {phoneme}".format( phoneme=key )) save_histogram_phoneme(key) def greatest_frequency_contributions_phonemes( phonemes_dictionary=phonemes_dictionary, print_table=True, save_plots=True, number_of_contributions=500, time=1, sample_rate=15300 ): acceptable_phonemes = \ [key for key, value in phonemes_dictionary.iteritems()] phonemes_frequency_contributions = {} table_contents = [[ "phoneme", "{number} greatest frequency contributions".format( number=number_of_contributions ) ]] for phoneme in acceptable_phonemes: values_amplitude = phoneme_values( phoneme=phoneme, length=2000 ) frequencies = datavision.greatest_frequency_contributions_FFT( values_amplitude=values_amplitude, time=time, sample_rate=sample_rate, number_of_contributions=number_of_contributions ) phonemes_frequency_contributions[phoneme] = frequencies table_contents.append([phoneme, str(frequencies)]) if save_plots is True: filename = "FFT_phoneme_{phoneme}.png".format(phoneme=phoneme) print("save file {filename}".format(filename=filename)) datavision.save_FFT_plot_matplotlib( values_amplitude=values_amplitude, time=time, sample_rate=sample_rate, filename=filename ) if print_table is True: print( pyprel.Table( contents=table_contents ) ) return phonemes_frequency_contributions def most_frequent_Brown_Corpus_words(): import nltk import nltk.corpus words = [] for word in nltk.corpus.brown.words(): if word not in [ ",", ".", "``", "''", ";", "?", "--", ")", "(", ":", "!" ]: words.append(word.lower()) frequencies_words = nltk.FreqDist(words).most_common() words_most_frequent = [word[0] for word in frequencies_words] return words_most_frequent def speak_most_frequent_Brown_Corpus_words( number=4000 ): _text = " ".join(most_frequent_Brown_Corpus_words()[:number]) print(_text) _phonemes = text_to_phonemes(text=_text) _data = phonemes_words_values( phonemes_words=_phonemes ) play_values( values=_data ) ################################################################################ # # # diagnostics # # # ################################################################################ # upcoming PortAudio checks if __name__ == "__main__": options = docopt.docopt(__doc__) if options["--version"]: print(version) exit() main(options)
gpl-3.0
charanpald/wallhack
wallhack/rankingexp/DatasetExp.py
1
1479
import logging import sys import numpy import matplotlib import powerlaw matplotlib.use("GTK3Agg") import matplotlib.pyplot as plt from wallhack.rankingexp.DatasetUtils import DatasetUtils """ Do some basic analysis on the recommendation datasets. """ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) #X, U, V = DatasetUtils.syntheticDataset1() #X = DatasetUtils.syntheticDataset2() X = DatasetUtils.movieLens(quantile=100) #X = DatasetUtils.flixster(quantile=100) #X = DatasetUtils.mendeley(quantile=50) print(X.shape) m, n = X.shape userCounts = X.sum(1) itemCounts = X.sum(0) results = powerlaw.Fit(itemCounts, discrete=True, xmax=n) print(results.power_law.alpha) print(results.power_law.xmin) print(results.power_law.xmax) u = 5 p = numpy.percentile(itemCounts, 100-u) popItems = itemCounts > p unpopItems = itemCounts <= p print(popItems.sum(), unpopItems.sum()) print(itemCounts[popItems].sum()) print(itemCounts[unpopItems].sum()) plt.figure(0) plt.hist(itemCounts, bins=50, log=True) plt.xlabel("users") plt.ylabel("log frequency") sortedCounts = numpy.flipud(numpy.sort(itemCounts))/numpy.sum(itemCounts) sortedCounts = numpy.cumsum(sortedCounts) plt.figure(1) plt.plot(sortedCounts) plt.xlabel("users") plt.ylabel("cum probability") plt.figure(2) plt.hist(itemCounts[unpopItems]) plt.xlabel("users") plt.ylabel("frequency") plt.figure(3) plt.hist(userCounts, bins=50, log=True) plt.xlabel("items") plt.ylabel("log frequency") plt.show()
gpl-3.0
jakereps/q2-feature-table
q2_feature_table/tests/filter/test_filter_features.py
2
8201
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2021, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import unittest import qiime2 import numpy as np import pandas as pd from biom.table import Table from q2_feature_table import filter_features class FilterFeaturesTests(unittest.TestCase): """ These tests are minimal relative to FilterSamplesTests, since the two functions being tested using the same private function under the hood. These tests cover the two places where the axis parameter is passed, to ensure that the tests work on the 'observation' axis as well as the 'sample' axis. """ def test_min_frequency(self): # no filtering table = Table(np.array([[0, 1, 1], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, min_frequency=2) expected = Table(np.array([[0, 1, 1], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) self.assertEqual(actual, expected) # filter one table = Table(np.array([[0, 1, 1], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, min_frequency=3) expected = Table(np.array([[1, 1, 2]]), ['O2'], ['S1', 'S2', 'S3']) self.assertEqual(actual, expected) # filter all table = Table(np.array([[0, 1, 1], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, min_frequency=5) expected = Table(np.array([]), [], []) self.assertEqual(actual, expected) def test_filter_empty_samples(self): # no filtering table = Table(np.array([[0, 1, 1], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, min_frequency=2, filter_empty_samples=False) expected = Table(np.array([[0, 1, 1], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) self.assertEqual(actual, expected) # filter all table = Table(np.array([[0, 1, 1], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, min_frequency=5, filter_empty_samples=False) expected = Table(np.empty((0, 3)), [], ['S1', 'S2', 'S3']) self.assertEqual(actual, expected) def test_feature_metadata(self): # no filtering df = pd.DataFrame({'SequencedGenome': ['yes', 'yes']}, index=pd.Index(['O1', 'O2'], name='id')) metadata = qiime2.Metadata(df) table = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, metadata=metadata) expected = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) self.assertEqual(actual, expected) # filter one df = pd.DataFrame({'SequencedGenome': ['yes']}, index=pd.Index(['O1'], name='id')) metadata = qiime2.Metadata(df) table = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, metadata=metadata) expected = Table(np.array([[1, 3]]), ['O1'], ['S2', 'S3']) self.assertEqual(actual, expected) # filter all df = pd.DataFrame({}, index=pd.Index(['foo'], name='id')) metadata = qiime2.Metadata(df) table = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, metadata=metadata) expected = Table(np.array([]), [], []) self.assertEqual(actual, expected) # exclude one df = pd.DataFrame({'SequencedGenome': ['yes']}, index=pd.Index(['O1'], name='id')) metadata = qiime2.Metadata(df) table = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, metadata=metadata, exclude_ids=True) expected = Table(np.array([[1, 1, 2]]), ['O2'], ['S1', 'S2', 'S3']) self.assertEqual(actual, expected) # exclude all df = pd.DataFrame({'SequencedGenome': ['yes', 'yes']}, index=pd.Index(['O1', 'O2'], name='id')) metadata = qiime2.Metadata(df) table = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) actual = filter_features(table, metadata=metadata, exclude_ids=True) expected = Table(np.array([]), [], []) self.assertEqual(actual, expected) def test_where(self): # no filtering df = pd.DataFrame({'SequencedGenome': ['yes', 'no']}, index=pd.Index(['O1', 'O2'], name='feature-id')) metadata = qiime2.Metadata(df) table = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) where = "SequencedGenome='yes' OR SequencedGenome='no'" actual = filter_features(table, metadata=metadata, where=where) expected = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) self.assertEqual(actual, expected) # filter one df = pd.DataFrame({'SequencedGenome': ['yes', 'no']}, index=pd.Index(['O1', 'O2'], name='feature-id')) metadata = qiime2.Metadata(df) table = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) where = "SequencedGenome='yes'" actual = filter_features(table, metadata=metadata, where=where) expected = Table(np.array([[1, 3]]), ['O1'], ['S2', 'S3']) self.assertEqual(actual, expected) # filter all df = pd.DataFrame({'SequencedGenome': ['yes', 'no']}, index=pd.Index(['O1', 'O2'], name='feature-id')) metadata = qiime2.Metadata(df) table = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) where = "SequencedGenome='yes' AND SequencedGenome='no'" actual = filter_features(table, metadata=metadata, where=where) expected = Table(np.array([]), [], []) self.assertEqual(actual, expected) # filter one -> exclude one df = pd.DataFrame({'SequencedGenome': ['yes', 'no']}, index=pd.Index(['O1', 'O2'], name='feature-id')) metadata = qiime2.Metadata(df) table = Table(np.array([[0, 1, 3], [1, 1, 2]]), ['O1', 'O2'], ['S1', 'S2', 'S3']) where = "SequencedGenome='yes'" actual = filter_features(table, exclude_ids=True, metadata=metadata, where=where) expected = Table(np.array([[1, 1, 2]]), ['O2'], ['S1', 'S2', 'S3']) self.assertEqual(actual, expected) if __name__ == "__main__": unittest.main()
bsd-3-clause
cauchycui/scikit-learn
sklearn/utils/tests/test_sparsefuncs.py
57
13752
import numpy as np import scipy.sparse as sp from scipy import linalg from numpy.testing import assert_array_almost_equal, assert_array_equal from sklearn.datasets import make_classification from sklearn.utils.sparsefuncs import (mean_variance_axis, inplace_column_scale, inplace_row_scale, inplace_swap_row, inplace_swap_column, min_max_axis, count_nonzero, csc_median_axis_0) from sklearn.utils.sparsefuncs_fast import assign_rows_csr from sklearn.utils.testing import assert_raises def test_mean_variance_axis0(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 X_csr = sp.csr_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csr, axis=0) assert_array_almost_equal(X_means, np.mean(X, axis=0)) assert_array_almost_equal(X_vars, np.var(X, axis=0)) X_csc = sp.csc_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csc, axis=0) assert_array_almost_equal(X_means, np.mean(X, axis=0)) assert_array_almost_equal(X_vars, np.var(X, axis=0)) assert_raises(TypeError, mean_variance_axis, X_lil, axis=0) X = X.astype(np.float32) X_csr = X_csr.astype(np.float32) X_csc = X_csr.astype(np.float32) X_means, X_vars = mean_variance_axis(X_csr, axis=0) assert_array_almost_equal(X_means, np.mean(X, axis=0)) assert_array_almost_equal(X_vars, np.var(X, axis=0)) X_means, X_vars = mean_variance_axis(X_csc, axis=0) assert_array_almost_equal(X_means, np.mean(X, axis=0)) assert_array_almost_equal(X_vars, np.var(X, axis=0)) assert_raises(TypeError, mean_variance_axis, X_lil, axis=0) def test_mean_variance_illegal_axis(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_csr = sp.csr_matrix(X) assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3) assert_raises(ValueError, mean_variance_axis, X_csr, axis=2) assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1) def test_mean_variance_axis1(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit X[0, 0] = 0 X[2, 1] = 0 X[4, 3] = 0 X_lil = sp.lil_matrix(X) X_lil[1, 0] = 0 X[1, 0] = 0 X_csr = sp.csr_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csr, axis=1) assert_array_almost_equal(X_means, np.mean(X, axis=1)) assert_array_almost_equal(X_vars, np.var(X, axis=1)) X_csc = sp.csc_matrix(X_lil) X_means, X_vars = mean_variance_axis(X_csc, axis=1) assert_array_almost_equal(X_means, np.mean(X, axis=1)) assert_array_almost_equal(X_vars, np.var(X, axis=1)) assert_raises(TypeError, mean_variance_axis, X_lil, axis=1) X = X.astype(np.float32) X_csr = X_csr.astype(np.float32) X_csc = X_csr.astype(np.float32) X_means, X_vars = mean_variance_axis(X_csr, axis=1) assert_array_almost_equal(X_means, np.mean(X, axis=1)) assert_array_almost_equal(X_vars, np.var(X, axis=1)) X_means, X_vars = mean_variance_axis(X_csc, axis=1) assert_array_almost_equal(X_means, np.mean(X, axis=1)) assert_array_almost_equal(X_vars, np.var(X, axis=1)) assert_raises(TypeError, mean_variance_axis, X_lil, axis=1) def test_densify_rows(): X = sp.csr_matrix([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) rows = np.array([0, 2, 3], dtype=np.intp) out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64) assign_rows_csr(X, rows, np.arange(out.shape[0], dtype=np.intp)[::-1], out) assert_array_equal(out, X[rows].toarray()[::-1]) def test_inplace_column_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(200) XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale inplace_column_scale(Xc, scale) inplace_column_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) def test_inplace_row_scale(): rng = np.random.RandomState(0) X = sp.rand(100, 200, 0.05) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() scale = rng.rand(100) XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) X = X.astype(np.float32) scale = scale.astype(np.float32) Xr = X.tocsr() Xc = X.tocsc() XA = X.toarray() XA *= scale.reshape(-1, 1) inplace_row_scale(Xc, scale) inplace_row_scale(Xr, scale) assert_array_almost_equal(Xr.toarray(), Xc.toarray()) assert_array_almost_equal(XA, Xc.toarray()) assert_array_almost_equal(XA, Xr.toarray()) assert_raises(TypeError, inplace_column_scale, X.tolil(), scale) def test_inplace_swap_row(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_row, X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[0], X[-1] = swap(X[0], X[-1]) inplace_swap_row(X_csr, 0, -1) inplace_swap_row(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[2], X[3] = swap(X[2], X[3]) inplace_swap_row(X_csr, 2, 3) inplace_swap_row(X_csc, 2, 3) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_row, X_csr.tolil()) def test_inplace_swap_column(): X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_column, X_csr.tolil()) X = np.array([[0, 3, 0], [2, 4, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) swap = linalg.get_blas_funcs(('swap',), (X,)) swap = swap[0] X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1]) inplace_swap_column(X_csr, 0, -1) inplace_swap_column(X_csc, 0, -1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1]) inplace_swap_column(X_csr, 0, 1) inplace_swap_column(X_csc, 0, 1) assert_array_equal(X_csr.toarray(), X_csc.toarray()) assert_array_equal(X, X_csc.toarray()) assert_array_equal(X, X_csr.toarray()) assert_raises(TypeError, inplace_swap_column, X_csr.tolil()) def test_min_max_axis0(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=0) assert_array_equal(mins_csr, X.min(axis=0)) assert_array_equal(maxs_csr, X.max(axis=0)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=0) assert_array_equal(mins_csc, X.min(axis=0)) assert_array_equal(maxs_csc, X.max(axis=0)) X = X.astype(np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=0) assert_array_equal(mins_csr, X.min(axis=0)) assert_array_equal(maxs_csr, X.max(axis=0)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=0) assert_array_equal(mins_csc, X.min(axis=0)) assert_array_equal(maxs_csc, X.max(axis=0)) def test_min_max_axis1(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=1) assert_array_equal(mins_csr, X.min(axis=1)) assert_array_equal(maxs_csr, X.max(axis=1)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=1) assert_array_equal(mins_csc, X.min(axis=1)) assert_array_equal(maxs_csc, X.max(axis=1)) X = X.astype(np.float32) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) mins_csr, maxs_csr = min_max_axis(X_csr, axis=1) assert_array_equal(mins_csr, X.min(axis=1)) assert_array_equal(maxs_csr, X.max(axis=1)) mins_csc, maxs_csc = min_max_axis(X_csc, axis=1) assert_array_equal(mins_csc, X.min(axis=1)) assert_array_equal(maxs_csc, X.max(axis=1)) def test_min_max_axis_errors(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0) assert_raises(ValueError, min_max_axis, X_csr, axis=2) assert_raises(ValueError, min_max_axis, X_csc, axis=-3) def test_count_nonzero(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) X_nonzero = X != 0 sample_weight = [.5, .2, .3, .1, .1] X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None] for axis in [0, 1, -1, -2, None]: assert_array_almost_equal(count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)) assert_array_almost_equal(count_nonzero(X_csr, axis=axis, sample_weight=sample_weight), X_nonzero_weighted.sum(axis=axis)) assert_raises(TypeError, count_nonzero, X_csc) assert_raises(ValueError, count_nonzero, X_csr, axis=2) def test_csc_row_median(): # Test csc_row_median actually calculates the median. # Test that it gives the same output when X is dense. rng = np.random.RandomState(0) X = rng.rand(100, 50) dense_median = np.median(X, axis=0) csc = sp.csc_matrix(X) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test that it gives the same output when X is sparse X = rng.rand(51, 100) X[X < 0.7] = 0.0 ind = rng.randint(0, 50, 10) X[ind] = -X[ind] csc = sp.csc_matrix(X) dense_median = np.median(X, axis=0) sparse_median = csc_median_axis_0(csc) assert_array_equal(sparse_median, dense_median) # Test for toy data. X = [[0, -2], [-1, -1], [1, 0], [2, 1]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5])) X = [[0, -2], [-1, -5], [1, -3]] csc = sp.csc_matrix(X) assert_array_equal(csc_median_axis_0(csc), np.array([0., -3])) # Test that it raises an Error for non-csc matrices. assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
bsd-3-clause
pratapvardhan/scikit-learn
sklearn/metrics/cluster/__init__.py
312
1322
""" The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for cluster analysis results. There are two forms of evaluation: - supervised, which uses a ground truth class values for each sample. - unsupervised, which does not and measures the 'quality' of the model itself. """ from .supervised import adjusted_mutual_info_score from .supervised import normalized_mutual_info_score from .supervised import adjusted_rand_score from .supervised import completeness_score from .supervised import contingency_matrix from .supervised import expected_mutual_information from .supervised import homogeneity_completeness_v_measure from .supervised import homogeneity_score from .supervised import mutual_info_score from .supervised import v_measure_score from .supervised import entropy from .unsupervised import silhouette_samples from .unsupervised import silhouette_score from .bicluster import consensus_score __all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score", "adjusted_rand_score", "completeness_score", "contingency_matrix", "expected_mutual_information", "homogeneity_completeness_v_measure", "homogeneity_score", "mutual_info_score", "v_measure_score", "entropy", "silhouette_samples", "silhouette_score", "consensus_score"]
bsd-3-clause
johnpfay/environ859
06_WebGIS/Scripts/01_GettingDataWithPandas.py
1
1032
#GettingDataWithPandas.py # # This script demonstrates how Pandas can read text data directly from # a web server using its read_csv function. Here we demonstrate it with # data from the USGS National Water Information System service. # # If you open your browser to the following URL, you will see the data we will use: # http://waterdata.usgs.gov/nc/nwis/uv?cb_00060=on&format=rdb&period=21&site_no=02085070 # # ...Or, you can [uncomment and] use the following snippet to tell Python to view the data for you... ##import webbrowser ##webbrowser.open(http://waterdata.usgs.gov/nc/nwis/uv?cb_00060=on&format=rdb&period=21&site_no=02085070 # So, our first example of automating the process of getting data is to demonstrate # how easy it is for pandas to read in an use these data. # Import the pandas module import pandas as pd # Set the url as a variable theURL = 'http://waterdata.usgs.gov/nc/nwis/uv?cb_00060=on&format=rdb&period=21&site_no=02085070' # Read in the data as a pandas data frame dfNWIS = pd.read_csv(theURL) #
gpl-3.0
johnmgregoire/JCAPdatavis
echem_FCVSsurfacearea.py
1
3121
import numpy, scipy from matplotlib.ticker import FuncFormatter import matplotlib.colors as colors from echem_plate_math import * import time, pickle from echem_plate_fcns import * p='C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe_3V_FCV_4835/Sample4825_x60_y65_A33B23C3D40_FCVS7.txt' p2='C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe_3V_FCV_4835/Sample4825_x60_y65_A33B23C3D40_FCVS8.txt' #p2='' vrange=(-.19, -.14) d=readechemtxt(p) if p2!='': d2=readechemtxt(p2) for k, v in d2.iteritems(): d[k]=numpy.append(d[k], v) vraw=d['Ewe(V)'] iraw=d['I(A)'] pylab.plot(d['Ewe(V)'], d['I(A)']) #pylab.show() vrbool=(vraw>=vrange[0])&(vraw<=vrange[1]) testlen=4 vrboolmean=numpy.array([vrbool[i:i+testlen].mean(dtype='float32')>.5 for i in range(len(vrbool)-testlen//2)]) vrboolapproxinds=numpy.where(numpy.logical_not(vrboolmean[:-1])&(vrboolmean[1:]))[0]+testlen//2 vrboolnoisyinds=numpy.where(numpy.logical_not(vrbool[:-1])&(vrbool[1:]))[0] vstartinds_seg=vrboolnoisyinds[numpy.array([numpy.argmin((vrboolnoisyinds-i)**2) for i in vrboolapproxinds])] vlen_seg=[] for i, j in zip(vstartinds_seg, numpy.concatenate([vstartinds_seg[1:], [-1]])): print len(vrboolmean), i, j, j-testlen vlen_seg+=[numpy.where(vrboolmean[i:j-testlen])[0][-1]+testlen//2] pylab.figure() segdl=[] for vsi, vlen in zip(vstartinds_seg, vlen_seg): segd={} for k in ['Ewe(V)','I(A)', 't(s)']: segd[k]=d[k][vsi:vsi+vlen] v=segd['Ewe(V)'] i=segd['I(A)'] t=segd['t(s)'] ans=scipy.polyfit(v, i, 1) segd['I_Efit']=scipy.polyval(ans, v) segd['I_Efitfitpars']=ans ans=scipy.polyfit(t, v, 1) segd['E_tfit']=scipy.polyval(ans, t) segd['E_tfitfitpars']=ans segdl+=[segd] pylab.plot(segd['Ewe(V)'], segd['I(A)']) pylab.plot(segd['Ewe(V)'], segd['I_Efit']) dEdt=numpy.array([sd['E_tfitfitpars'][0] for sd in segdl]) dIdE=numpy.array([sd['I_Efitfitpars'][0] for sd in segdl]) C=numpy.array([numpy.trapz(sd['I_Efitfitpars'], x=sd['t(s)']) for sd in segdl]) inds=numpy.arange(0, len(segdl), 2) dEdtmean=(numpy.abs(dEdt[inds])+numpy.abs(dEdt[inds+1]))/2. dC=C[inds]-C[inds+1] vtest=numpy.array(vrange).mean() itestarr=numpy.array([scipy.polyval(sd['I_Efitfitpars'], vtest) for sd in segdl]) delI=itestarr[inds]-itestarr[inds+1] #pylab.figure() #pylab.plot(dEdtmean, dC*1.e6, 'o') #pylab.ylabel('differentialcharge (microC)') #pylab.xlabel('ave scan rate (V/s)') pylab.figure() dIdtplot=dIdE*dEdt*1.e6 pylab.plot(dIdtplot[inds], 'bo', label='fwd') pylab.plot(numpy.abs(dIdtplot[inds+1]), 'go', label='rev') pylab.ylabel('dI/dt (microA/s)') pylab.xlabel('CV number') pylab.legend(loc=2) pylab.figure() pylab.plot(dEdtmean, delI*1.e6, 'o') pylab.ylabel('capacitive current ($\mu$A)') pylab.xlabel('ave scan rate (V/s)') CC_dEdtfitpars=scipy.polyfit(dEdtmean, delI, 1) lims=numpy.array([0, dEdtmean.max()]) fitvals=scipy.polyval(CC_dEdtfitpars, lims) pylab.plot(lims, fitvals*1.e6, 'r-') pylab.title('%.2e $\mu$C/V +%.2e $\mu$A' %(CC_dEdtfitpars[0]*1.e6, CC_dEdtfitpars[1]*1.e6)) pylab.show()
bsd-3-clause
rbharath/pande-gas
vs_utils/utils/target_utils.py
1
13943
""" Utilities for parsing target files from different sources (e.g. PubChem BioAssay). """ __author__ = "Steven Kearnes" __copyright__ = "Copyright 2014, Stanford University" __license__ = "BSD 3-clause" from collections import OrderedDict import gzip import numpy as np import pandas as pd import warnings from rdkit_utils import serial from vs_utils.utils import read_pickle, SmilesGenerator class AssayDataParser(object): """ Parse assay data files. Parameters ---------- data_filename : str Data filename. map_filename : str Compound ID->SMILES map filename. primary_key : str Name of column containing compound IDs. id_prefix : str, optional Prefix to prepend to compound IDs for mapping compound IDs to SMILES. activity_key : str, optional Name of column containing compound activity assignments. Must be provided if column_indices is None. If both activity_key and column_indices are set, column_indices will be used. activity_value : str, optional Value of positive class in activity_key. For example, 'Active' when parsing PubChem BioAssay data. column_indices : list, optional Data column indices to include. Must be provided if activity_key is None. If both activity_key and column_indices are set, column_indices will be used. delimiter : str, optional (default '\t') Delimiter to use when parsing data file. """ def __init__(self, data_filename, map_filename, primary_key, id_prefix=None, activity_key=None, activity_value=None, column_indices=None, delimiter='\t'): self.data_filename = data_filename self.map_filename = map_filename self.primary_key = primary_key self.id_prefix = id_prefix if activity_key is None and column_indices is None: raise ValueError( 'One of activity_key or column_indices must be set.') if activity_key is not None and activity_value is None: raise ValueError( 'You must set activity_value when using activity_key.') self.activity_key = activity_key self.activity_value = activity_value if column_indices is not None: column_indices = np.asarray(column_indices, dtype=int) self.column_indices = column_indices self.delimiter = delimiter def get_targets(self): """ Parse data file and return targets and corresponding SMILES. Procedure --------- 1. Read data and get unique rows by compound ID. 2. Map compound IDs to SMILES. 3. Extract targets from data. """ data = self.read_data() id_map = read_pickle(self.map_filename) # get compound SMILES from map # indices are for data rows successfully mapped to SMILES smiles, indices = self.map_ids_to_smiles(data[self.primary_key], id_map) # get targets if self.column_indices is not None: targets = np.zeros((data.shape[0], len(self.column_indices)), dtype=float) for i, idx in enumerate(self.column_indices): targets[:, i] = data[data.columns[idx]] else: targets = np.asarray( data[self.activity_key] == self.activity_value) targets = targets[indices] # reduce targets to matched structures return smiles, targets def read_data(self, **kwargs): """ Read assay data file. Parameters ---------- kwargs : dict, optional Keyword arguments for pd.read_table. """ if self.data_filename.endswith('.gz'): with gzip.open(self.data_filename) as f: df = pd.read_table(f, sep=self.delimiter, **kwargs) else: df = pd.read_table(self.data_filename, sep=self.delimiter, **kwargs) df = df.drop_duplicates(self.primary_key) # remove duplicate IDs return df def map_ids_to_smiles(self, ids, id_map): """ Look up SMILES for compound IDs in a compound ID->SMILES map. Parameters ---------- ids : array_like List of compound IDs. id_map : dict Compound ID->SMILES map. """ smiles = [] indices = [] for i, this_id in enumerate(ids): if np.isnan(this_id): continue try: this_id = int(this_id) # CIDs are often read in as floats except ValueError: pass if self.id_prefix is not None: # no bare IDs allowed in maps this_id = '{}{}'.format(self.id_prefix, this_id) if this_id in id_map: smiles.append(id_map[this_id]) indices.append(i) return np.asarray(smiles), np.asarray(indices) def get_column_names(self): """ Get names of selected data columns. """ if self.column_indices is None: return names = [] for i in self.column_indices: names.append(self.read_data().columns[i]) return names class PcbaParser(AssayDataParser): """ Parse PubChem BioAssay (PCBA) target files. Parameters ---------- data_filename : str Data filename. map_filename : str Compound ID->SMILES map filename. primary_key : str, optional (default 'PUBCHEM_CID') Name of column containing compound IDs. id_prefix : str, optional (default 'CID') Prefix to prepend to compound IDs for mapping compound IDs to SMILES. activity_key : str, optional (default 'PUBCHEM_ACTIVITY_OUTCOME') Name of column containing compound activity assignments. Must be provided if column_indices is None. If both activity_key and column_indices are set, column_indices will be used. activity_value : str, optional (default 'Active') Value of positive class in activity_key. For example, 'Active' when parsing PubChem BioAssay data. column_indices : list, optional Data column indices to include. Must be provided if activity_key is None. If both activity_key and column_indices are set, column_indices will be used. delimiter : str, optional (default ',') Delimiter to use when parsing data file. """ def __init__(self, data_filename, map_filename, primary_key='PUBCHEM_CID', id_prefix='CID', activity_key='PUBCHEM_ACTIVITY_OUTCOME', activity_value='Active', column_indices=None, delimiter=','): super(PcbaParser, self).__init__( data_filename, map_filename, primary_key, id_prefix, activity_key, activity_value, column_indices, delimiter) class Nci60Parser(AssayDataParser): """ Parse NCI60 target file. Parameters ---------- data_filename : str Data filename. map_filename : str Compound ID->SMILES map filename. primary_key : str, optional (default 'NSC') Name of column containing compound IDs. id_prefix : str, optional (default 'NSC') Prefix to prepend to compound IDs for mapping compound IDs to SMILES. activity_key : str, optional Name of column containing compound activity assignments. Must be provided if column_indices is None. If both activity_key and column_indices are set, column_indices will be used. activity_value : str, optional Value of positive class in activity_key. For example, 'Active' when parsing PubChem BioAssay data. column_indices : list, optional (default range(4, 64)) Data column indices to include. Must be provided if activity_key is None. If both activity_key and column_indices are set, column_indices will be used. delimiter : str, optional (default '\t') Delimiter to use when parsing data file. """ def __init__(self, data_filename, map_filename, primary_key='NSC', id_prefix='NSC', activity_key=None, activity_value=None, column_indices=range(4, 64), delimiter='\t'): super(Nci60Parser, self).__init__( data_filename, map_filename, primary_key, id_prefix, activity_key, activity_value, column_indices, delimiter) def read_data(self, **kwargs): """ Read assay data file. Parameters ---------- kwargs : dict, optional Keyword arguments for pd.read_table. """ # treat '-' and 'na' values as NaNs return super(Nci60Parser, self).read_data(na_values=['-', 'na']) def split_targets(self): """ Split targets among different assays. """ df = self.read_data() names = df.columns[self.column_indices] smiles, targets = self.get_targets() split_targets = OrderedDict() for i, name in enumerate(names): keep = ~np.isnan(targets[:, i]) if not np.count_nonzero(keep): warnings.warn( 'Assay "{}" has no matching records.'.format(name)) continue split_targets[name] = {'smiles': smiles[keep], 'targets': targets[keep]} return split_targets class Tox21Parser(object): """ Parse Tox21 data files. Parameters ---------- filename : str Data filename. merge_strategy : str, optional (default 'max') Strategy to use when merging targets for duplicated molecules. Choose from 'max' (active if active in any assay), 'min' (inactive if inactive in any assay), 'majority_pos' (majority vote with ties assigned active), or 'majority_neg' (majority vote with ties assigned inactive). """ dataset_names = ['NR-AR', 'NR-AhR', 'NR-AR-LBD', 'NR-ER', 'NR-ER-LBD', 'NR-Aromatase', 'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53'] def __init__(self, filename, merge_strategy='max'): self.filename = filename assert merge_strategy in ['max', 'min', 'majority_pos', 'majority_neg'] self.merge_strategy = merge_strategy def read_data(self): """ Read labeled molecules. """ with serial.MolReader().open(self.filename) as reader: mols = list(reader) return mols def read_targets(self): """ Get labels for molecules from SD data fields matching dataset names. Returns ------- data : dict Nested dictionary containing SMILES and targets for compounds in each dataset. Keyed by data->dataset->SMILES->target, where target is a list. """ engine = SmilesGenerator() data = {dataset: {} for dataset in self.dataset_names} skipped = [] for mol in self.read_data(): smiles = engine.get_smiles(mol) for prop in list(mol.GetPropNames()): if prop in data: score = int(mol.GetProp(prop)) if smiles not in data[prop]: data[prop][smiles] = [] data[prop][smiles].append(score) else: # skip irrelevant SD fields if prop not in skipped: skipped.append(prop) continue print 'Skipped properties:\n{}'.format('\n'.join(skipped)) return data def merge_targets(self, data): """ Merge labels for duplicate molecules according to a specified merge stratecy ('max', 'min', 'majority_pos', 'majority_neg'). Parameters ---------- data : dict Nested dictionary containing SMILES and targets for compounds in each dataset. Keyed by data->dataset->SMILES->target, where target is a list. Returns ------- data : dict Nested dictionary containing SMILES and targets for compounds in each dataset. Keyed by data->dataset->SMILES->target, where target is an integer. """ for dataset in self.dataset_names: for smiles, targets in data[dataset].items(): targets = np.asarray(targets, dtype=int) if self.merge_strategy == 'max': data[dataset][smiles] = max(targets) elif self.merge_strategy == 'min': data[dataset][smiles] = min(targets) # 0.5 rounds down elif self.merge_strategy == 'majority_neg': data[dataset][smiles] = int(np.round(np.mean(targets))) # 0.5 rounds up elif self.merge_strategy == 'majority_pos': data[dataset][smiles] = (int(np.round( np.mean(targets) + 1)) - 1) return data def get_targets(self): """ Get SMILES and targets for each Tox21 dataset. """ split_targets = {} data = self.merge_targets(self.read_targets()) for dataset in data: if not len(data[dataset]): warnings.warn('Dataset "{}" is empty'.format(dataset)) continue smiles, targets = [], [] for this_smiles, target in data[dataset].items(): smiles.append(this_smiles) targets.append(target) split_targets[dataset] = {'smiles': np.asarray(smiles), 'targets': np.asarray( targets, dtype=int)} return split_targets
bsd-3-clause
vybstat/scikit-learn
examples/text/document_clustering.py
230
8356
""" ======================================= Clustering text documents using k-means ======================================= This is an example showing how the scikit-learn can be used to cluster documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features instead of standard numpy arrays. Two feature extraction methods can be used in this example: - TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most frequent words to features indices and hence compute a word occurrence frequency (sparse) matrix. The word frequencies are then reweighted using the Inverse Document Frequency (IDF) vector collected feature-wise over the corpus. - HashingVectorizer hashes word occurrences to a fixed dimensional space, possibly with collisions. The word count vectors are then normalized to each have l2-norm equal to one (projected to the euclidean unit-ball) which seems to be important for k-means to work in high dimensional space. HashingVectorizer does not provide IDF weighting as this is a stateless model (the fit method does nothing). When IDF weighting is needed it can be added by pipelining its output to a TfidfTransformer instance. Two algorithms are demoed: ordinary k-means and its more scalable cousin minibatch k-means. Additionally, latent sematic analysis can also be used to reduce dimensionality and discover latent patterns in the data. It can be noted that k-means (and minibatch k-means) are very sensitive to feature scaling and that in this case the IDF weighting helps improve the quality of the clustering by quite a lot as measured against the "ground truth" provided by the class label assignments of the 20 newsgroups dataset. This improvement is not visible in the Silhouette Coefficient which is small for both as this measure seem to suffer from the phenomenon called "Concentration of Measure" or "Curse of Dimensionality" for high dimensional datasets such as text data. Other measures such as V-measure and Adjusted Rand Index are information theoretic based evaluation scores: as they are only based on cluster assignments rather than distances, hence not affected by the curse of dimensionality. Note: as k-means is optimizing a non-convex objective function, it will likely end up in a local optimum. Several runs with independent random init might be necessary to get a good convergence. """ # Author: Peter Prettenhofer <[email protected]> # Lars Buitinck <[email protected]> # License: BSD 3 clause from __future__ import print_function from sklearn.datasets import fetch_20newsgroups from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer from sklearn import metrics from sklearn.cluster import KMeans, MiniBatchKMeans import logging from optparse import OptionParser import sys from time import time import numpy as np # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # parse commandline arguments op = OptionParser() op.add_option("--lsa", dest="n_components", type="int", help="Preprocess documents with latent semantic analysis.") op.add_option("--no-minibatch", action="store_false", dest="minibatch", default=True, help="Use ordinary k-means algorithm (in batch mode).") op.add_option("--no-idf", action="store_false", dest="use_idf", default=True, help="Disable Inverse Document Frequency feature weighting.") op.add_option("--use-hashing", action="store_true", default=False, help="Use a hashing feature vectorizer") op.add_option("--n-features", type=int, default=10000, help="Maximum number of features (dimensions)" " to extract from text.") op.add_option("--verbose", action="store_true", dest="verbose", default=False, help="Print progress reports inside k-means algorithm.") print(__doc__) op.print_help() (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) ############################################################################### # Load some categories from the training set categories = [ 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space', ] # Uncomment the following to do the analysis on all the categories #categories = None print("Loading 20 newsgroups dataset for categories:") print(categories) dataset = fetch_20newsgroups(subset='all', categories=categories, shuffle=True, random_state=42) print("%d documents" % len(dataset.data)) print("%d categories" % len(dataset.target_names)) print() labels = dataset.target true_k = np.unique(labels).shape[0] print("Extracting features from the training dataset using a sparse vectorizer") t0 = time() if opts.use_hashing: if opts.use_idf: # Perform an IDF normalization on the output of HashingVectorizer hasher = HashingVectorizer(n_features=opts.n_features, stop_words='english', non_negative=True, norm=None, binary=False) vectorizer = make_pipeline(hasher, TfidfTransformer()) else: vectorizer = HashingVectorizer(n_features=opts.n_features, stop_words='english', non_negative=False, norm='l2', binary=False) else: vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features, min_df=2, stop_words='english', use_idf=opts.use_idf) X = vectorizer.fit_transform(dataset.data) print("done in %fs" % (time() - t0)) print("n_samples: %d, n_features: %d" % X.shape) print() if opts.n_components: print("Performing dimensionality reduction using LSA") t0 = time() # Vectorizer results are normalized, which makes KMeans behave as # spherical k-means for better results. Since LSA/SVD results are # not normalized, we have to redo the normalization. svd = TruncatedSVD(opts.n_components) normalizer = Normalizer(copy=False) lsa = make_pipeline(svd, normalizer) X = lsa.fit_transform(X) print("done in %fs" % (time() - t0)) explained_variance = svd.explained_variance_ratio_.sum() print("Explained variance of the SVD step: {}%".format( int(explained_variance * 100))) print() ############################################################################### # Do the actual clustering if opts.minibatch: km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=opts.verbose) else: km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1, verbose=opts.verbose) print("Clustering sparse data with %s" % km) t0 = time() km.fit(X) print("done in %0.3fs" % (time() - t0)) print() print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_)) print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_)) print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_)) print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(labels, km.labels_)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, km.labels_, sample_size=1000)) print() if not opts.use_hashing: print("Top terms per cluster:") if opts.n_components: original_space_centroids = svd.inverse_transform(km.cluster_centers_) order_centroids = original_space_centroids.argsort()[:, ::-1] else: order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = vectorizer.get_feature_names() for i in range(true_k): print("Cluster %d:" % i, end='') for ind in order_centroids[i, :10]: print(' %s' % terms[ind], end='') print()
bsd-3-clause
NunoEdgarGub1/scikit-learn
benchmarks/bench_multilabel_metrics.py
86
7286
#!/usr/bin/env python """ A comparison of multilabel target formats and metrics over them """ from __future__ import division from __future__ import print_function from timeit import timeit from functools import partial import itertools import argparse import sys import matplotlib.pyplot as plt import scipy.sparse as sp import numpy as np from sklearn.datasets import make_multilabel_classification from sklearn.metrics import (f1_score, accuracy_score, hamming_loss, jaccard_similarity_score) from sklearn.utils.testing import ignore_warnings METRICS = { 'f1': partial(f1_score, average='micro'), 'f1-by-sample': partial(f1_score, average='samples'), 'accuracy': accuracy_score, 'hamming': hamming_loss, 'jaccard': jaccard_similarity_score, } FORMATS = { 'sequences': lambda y: [list(np.flatnonzero(s)) for s in y], 'dense': lambda y: y, 'csr': lambda y: sp.csr_matrix(y), 'csc': lambda y: sp.csc_matrix(y), } @ignore_warnings def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())), formats=tuple(v for k, v in sorted(FORMATS.items())), samples=1000, classes=4, density=.2, n_times=5): """Times metric calculations for a number of inputs Parameters ---------- metrics : array-like of callables (1d or 0d) The metric functions to time. formats : array-like of callables (1d or 0d) These may transform a dense indicator matrix into multilabel representation. samples : array-like of ints (1d or 0d) The number of samples to generate as input. classes : array-like of ints (1d or 0d) The number of classes in the input. density : array-like of ints (1d or 0d) The density of positive labels in the input. n_times : int Time calling the metric n_times times. Returns ------- array of floats shaped like (metrics, formats, samples, classes, density) Time in seconds. """ metrics = np.atleast_1d(metrics) samples = np.atleast_1d(samples) classes = np.atleast_1d(classes) density = np.atleast_1d(density) formats = np.atleast_1d(formats) out = np.zeros((len(metrics), len(formats), len(samples), len(classes), len(density)), dtype=float) it = itertools.product(samples, classes, density) for i, (s, c, d) in enumerate(it): _, y_true = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, return_indicator=True, random_state=42) _, y_pred = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, return_indicator=True, random_state=84) for j, f in enumerate(formats): f_true = f(y_true) f_pred = f(y_pred) for k, metric in enumerate(metrics): t = timeit(partial(metric, f_true, f_pred), number=n_times) out[k, j].flat[i] = t return out def _tabulate(results, metrics, formats): """Prints results by metric and format Uses the last ([-1]) value of other fields """ column_width = max(max(len(k) for k in formats) + 1, 8) first_width = max(len(k) for k in metrics) head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats)) row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats)) print(head_fmt.format('Metric', *formats, cw=column_width, fw=first_width)) for metric, row in zip(metrics, results[:, :, -1, -1, -1]): print(row_fmt.format(metric, *row, cw=column_width, fw=first_width)) def _plot(results, metrics, formats, title, x_ticks, x_label, format_markers=('x', '|', 'o', '+'), metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')): """ Plot the results by metric, format and some other variable given by x_label """ fig = plt.figure('scikit-learn multilabel metrics benchmarks') plt.title(title) ax = fig.add_subplot(111) for i, metric in enumerate(metrics): for j, format in enumerate(formats): ax.plot(x_ticks, results[i, j].flat, label='{}, {}'.format(metric, format), marker=format_markers[j], color=metric_colors[i % len(metric_colors)]) ax.set_xlabel(x_label) ax.set_ylabel('Time (s)') ax.legend() plt.show() if __name__ == "__main__": ap = argparse.ArgumentParser() ap.add_argument('metrics', nargs='*', default=sorted(METRICS), help='Specifies metrics to benchmark, defaults to all. ' 'Choices are: {}'.format(sorted(METRICS))) ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS), help='Specifies multilabel formats to benchmark ' '(defaults to all).') ap.add_argument('--samples', type=int, default=1000, help='The number of samples to generate') ap.add_argument('--classes', type=int, default=10, help='The number of classes') ap.add_argument('--density', type=float, default=.2, help='The average density of labels per sample') ap.add_argument('--plot', choices=['classes', 'density', 'samples'], default=None, help='Plot time with respect to this parameter varying ' 'up to the specified value') ap.add_argument('--n-steps', default=10, type=int, help='Plot this many points for each metric') ap.add_argument('--n-times', default=5, type=int, help="Time performance over n_times trials") args = ap.parse_args() if args.plot is not None: max_val = getattr(args, args.plot) if args.plot in ('classes', 'samples'): min_val = 2 else: min_val = 0 steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:] if args.plot in ('classes', 'samples'): steps = np.unique(np.round(steps).astype(int)) setattr(args, args.plot, steps) if args.metrics is None: args.metrics = sorted(METRICS) if args.formats is None: args.formats = sorted(FORMATS) results = benchmark([METRICS[k] for k in args.metrics], [FORMATS[k] for k in args.formats], args.samples, args.classes, args.density, args.n_times) _tabulate(results, args.metrics, args.formats) if args.plot is not None: print('Displaying plot', file=sys.stderr) title = ('Multilabel metrics with %s' % ', '.join('{0}={1}'.format(field, getattr(args, field)) for field in ['samples', 'classes', 'density'] if args.plot != field)) _plot(results, args.metrics, args.formats, title, steps, args.plot)
bsd-3-clause
valexandersaulys/airbnb_kaggle_contest
venv/lib/python3.4/site-packages/pandas/io/pickle.py
15
1656
from pandas.compat import cPickle as pkl, pickle_compat as pc, PY3 def to_pickle(obj, path): """ Pickle (serialize) object to input file path Parameters ---------- obj : any object path : string File path """ with open(path, 'wb') as f: pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL) def read_pickle(path): """ Load pickled pandas object (or any other pickled object) from the specified file path Warning: Loading pickled data received from untrusted sources can be unsafe. See: http://docs.python.org/2.7/library/pickle.html Parameters ---------- path : string File path Returns ------- unpickled : type of object stored in file """ def try_read(path, encoding=None): # try with cPickle # try with current pickle, if we have a Type Error then # try with the compat pickle to handle subclass changes # pass encoding only if its not None as py2 doesn't handle # the param # cpickle # GH 6899 try: with open(path, 'rb') as fh: return pkl.load(fh) except (Exception) as e: # reg/patched pickle try: with open(path, 'rb') as fh: return pc.load(fh, encoding=encoding, compat=False) # compat pickle except: with open(path, 'rb') as fh: return pc.load(fh, encoding=encoding, compat=True) try: return try_read(path) except: if PY3: return try_read(path, encoding='latin1') raise
gpl-2.0
bnaul/scikit-learn
sklearn/ensemble/_gb.py
2
68941
"""Gradient Boosted Regression Trees This module contains methods for fitting gradient boosted regression trees for both classification and regression. The module structure is the following: - The ``BaseGradientBoosting`` base class implements a common ``fit`` method for all the estimators in the module. Regression and classification only differ in the concrete ``LossFunction`` used. - ``GradientBoostingClassifier`` implements gradient boosting for classification problems. - ``GradientBoostingRegressor`` implements gradient boosting for regression problems. """ # Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti, # Arnaud Joly, Jacob Schreiber # License: BSD 3 clause from abc import ABCMeta from abc import abstractmethod import warnings from ._base import BaseEnsemble from ..base import ClassifierMixin from ..base import RegressorMixin from ..base import BaseEstimator from ..base import is_classifier from ._gradient_boosting import predict_stages from ._gradient_boosting import predict_stage from ._gradient_boosting import _random_sample_mask import numbers import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import issparse from time import time from ..model_selection import train_test_split from ..tree import DecisionTreeRegressor from ..tree._tree import DTYPE, DOUBLE from . import _gb_losses from ..utils import check_random_state from ..utils import check_array from ..utils import column_or_1d from ..utils.validation import check_is_fitted, _check_sample_weight from ..utils.multiclass import check_classification_targets from ..exceptions import NotFittedError from ..utils.validation import _deprecate_positional_args class VerboseReporter: """Reports verbose output to stdout. Parameters ---------- verbose : int Verbosity level. If ``verbose==1`` output is printed once in a while (when iteration mod verbose_mod is zero).; if larger than 1 then output is printed for each update. """ def __init__(self, verbose): self.verbose = verbose def init(self, est, begin_at_stage=0): """Initialize reporter Parameters ---------- est : Estimator The estimator begin_at_stage : int, default=0 stage at which to begin reporting """ # header fields and line format str header_fields = ['Iter', 'Train Loss'] verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}'] # do oob? if est.subsample < 1: header_fields.append('OOB Improve') verbose_fmt.append('{oob_impr:>16.4f}') header_fields.append('Remaining Time') verbose_fmt.append('{remaining_time:>16s}') # print the header line print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields)) self.verbose_fmt = ' '.join(verbose_fmt) # plot verbose info each time i % verbose_mod == 0 self.verbose_mod = 1 self.start_time = time() self.begin_at_stage = begin_at_stage def update(self, j, est): """Update reporter with new iteration. Parameters ---------- j : int The new iteration est : Estimator The estimator """ do_oob = est.subsample < 1 # we need to take into account if we fit additional estimators. i = j - self.begin_at_stage # iteration relative to the start iter if (i + 1) % self.verbose_mod == 0: oob_impr = est.oob_improvement_[j] if do_oob else 0 remaining_time = ((est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)) if remaining_time > 60: remaining_time = '{0:.2f}m'.format(remaining_time / 60.0) else: remaining_time = '{0:.2f}s'.format(remaining_time) print(self.verbose_fmt.format(iter=j + 1, train_score=est.train_score_[j], oob_impr=oob_impr, remaining_time=remaining_time)) if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0): # adjust verbose frequency (powers of 10) self.verbose_mod *= 10 class BaseGradientBoosting(BaseEnsemble, metaclass=ABCMeta): """Abstract base class for Gradient Boosting. """ @abstractmethod def __init__(self, *, loss, learning_rate, n_estimators, criterion, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_depth, min_impurity_decrease, min_impurity_split, init, subsample, max_features, ccp_alpha, random_state, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, validation_fraction=0.1, n_iter_no_change=None, tol=1e-4): self.n_estimators = n_estimators self.learning_rate = learning_rate self.loss = loss self.criterion = criterion self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.subsample = subsample self.max_features = max_features self.max_depth = max_depth self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split self.ccp_alpha = ccp_alpha self.init = init self.random_state = random_state self.alpha = alpha self.verbose = verbose self.max_leaf_nodes = max_leaf_nodes self.warm_start = warm_start self.validation_fraction = validation_fraction self.n_iter_no_change = n_iter_no_change self.tol = tol def _fit_stage(self, i, X, y, raw_predictions, sample_weight, sample_mask, random_state, X_csc=None, X_csr=None): """Fit another stage of ``n_classes_`` trees to the boosting model. """ assert sample_mask.dtype == bool loss = self.loss_ original_y = y # Need to pass a copy of raw_predictions to negative_gradient() # because raw_predictions is partially updated at the end of the loop # in update_terminal_regions(), and gradients need to be evaluated at # iteration i - 1. raw_predictions_copy = raw_predictions.copy() for k in range(loss.K): if loss.is_multi_class: y = np.array(original_y == k, dtype=np.float64) residual = loss.negative_gradient(y, raw_predictions_copy, k=k, sample_weight=sample_weight) # induce regression tree on residuals tree = DecisionTreeRegressor( criterion=self.criterion, splitter='best', max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, min_impurity_decrease=self.min_impurity_decrease, min_impurity_split=self.min_impurity_split, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state, ccp_alpha=self.ccp_alpha) if self.subsample < 1.0: # no inplace multiplication! sample_weight = sample_weight * sample_mask.astype(np.float64) X = X_csr if X_csr is not None else X tree.fit(X, residual, sample_weight=sample_weight, check_input=False) # update tree leaves loss.update_terminal_regions( tree.tree_, X, y, residual, raw_predictions, sample_weight, sample_mask, learning_rate=self.learning_rate, k=k) # add tree to ensemble self.estimators_[i, k] = tree return raw_predictions def _check_params(self): """Check validity of parameters and raise ValueError if not valid. """ if self.n_estimators <= 0: raise ValueError("n_estimators must be greater than 0 but " "was %r" % self.n_estimators) if self.learning_rate <= 0.0: raise ValueError("learning_rate must be greater than 0 but " "was %r" % self.learning_rate) if (self.loss not in self._SUPPORTED_LOSS or self.loss not in _gb_losses.LOSS_FUNCTIONS): raise ValueError("Loss '{0:s}' not supported. ".format(self.loss)) if self.loss == 'deviance': loss_class = (_gb_losses.MultinomialDeviance if len(self.classes_) > 2 else _gb_losses.BinomialDeviance) else: loss_class = _gb_losses.LOSS_FUNCTIONS[self.loss] if self.loss in ('huber', 'quantile'): self.loss_ = loss_class(self.n_classes_, self.alpha) else: self.loss_ = loss_class(self.n_classes_) if not (0.0 < self.subsample <= 1.0): raise ValueError("subsample must be in (0,1] but " "was %r" % self.subsample) if self.init is not None: # init must be an estimator or 'zero' if isinstance(self.init, BaseEstimator): self.loss_.check_init_estimator(self.init) elif not (isinstance(self.init, str) and self.init == 'zero'): raise ValueError( "The init parameter must be an estimator or 'zero'. " "Got init={}".format(self.init) ) if not (0.0 < self.alpha < 1.0): raise ValueError("alpha must be in (0.0, 1.0) but " "was %r" % self.alpha) if isinstance(self.max_features, str): if self.max_features == "auto": # if is_classification if self.n_classes_ > 1: max_features = max(1, int(np.sqrt(self.n_features_))) else: # is regression max_features = self.n_features_ elif self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features_))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features_))) else: raise ValueError("Invalid value for max_features: %r. " "Allowed string values are 'auto', 'sqrt' " "or 'log2'." % self.max_features) elif self.max_features is None: max_features = self.n_features_ elif isinstance(self.max_features, numbers.Integral): max_features = self.max_features else: # float if 0. < self.max_features <= 1.: max_features = max(int(self.max_features * self.n_features_), 1) else: raise ValueError("max_features must be in (0, n_features]") self.max_features_ = max_features if not isinstance(self.n_iter_no_change, (numbers.Integral, type(None))): raise ValueError("n_iter_no_change should either be None or an " "integer. %r was passed" % self.n_iter_no_change) def _init_state(self): """Initialize model state and allocate model state data structures. """ self.init_ = self.init if self.init_ is None: self.init_ = self.loss_.init_estimator() self.estimators_ = np.empty((self.n_estimators, self.loss_.K), dtype=object) self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64) # do oob? if self.subsample < 1.0: self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64) def _clear_state(self): """Clear the state of the gradient boosting model. """ if hasattr(self, 'estimators_'): self.estimators_ = np.empty((0, 0), dtype=object) if hasattr(self, 'train_score_'): del self.train_score_ if hasattr(self, 'oob_improvement_'): del self.oob_improvement_ if hasattr(self, 'init_'): del self.init_ if hasattr(self, '_rng'): del self._rng def _resize_state(self): """Add additional ``n_estimators`` entries to all attributes. """ # self.n_estimators is the number of additional est to fit total_n_estimators = self.n_estimators if total_n_estimators < self.estimators_.shape[0]: raise ValueError('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0])) self.estimators_ = np.resize(self.estimators_, (total_n_estimators, self.loss_.K)) self.train_score_ = np.resize(self.train_score_, total_n_estimators) if (self.subsample < 1 or hasattr(self, 'oob_improvement_')): # if do oob resize arrays or create new if not available if hasattr(self, 'oob_improvement_'): self.oob_improvement_ = np.resize(self.oob_improvement_, total_n_estimators) else: self.oob_improvement_ = np.zeros((total_n_estimators,), dtype=np.float64) def _is_initialized(self): return len(getattr(self, 'estimators_', [])) > 0 def _check_initialized(self): """Check that the estimator is initialized, raising an error if not.""" check_is_fitted(self) def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. y : array-like of shape (n_samples,) Target values (strings or integers in classification, real numbers in regression) For classification, labels must correspond to classes. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. monitor : callable, default=None The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object """ # if not warmstart - clear the estimator state if not self.warm_start: self._clear_state() # Check input # Since check_array converts both X and y to the same dtype, but the # trees use different types for X and y, checking them separately. X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE, multi_output=True) n_samples, self.n_features_ = X.shape sample_weight_is_none = sample_weight is None sample_weight = _check_sample_weight(sample_weight, X) y = column_or_1d(y, warn=True) y = self._validate_y(y, sample_weight) if self.n_iter_no_change is not None: stratify = y if is_classifier(self) else None X, X_val, y, y_val, sample_weight, sample_weight_val = ( train_test_split(X, y, sample_weight, random_state=self.random_state, test_size=self.validation_fraction, stratify=stratify)) if is_classifier(self): if self.n_classes_ != np.unique(y).shape[0]: # We choose to error here. The problem is that the init # estimator would be trained on y, which has some missing # classes now, so its predictions would not have the # correct shape. raise ValueError( 'The training data after the early stopping split ' 'is missing some classes. Try using another random ' 'seed.' ) else: X_val = y_val = sample_weight_val = None self._check_params() if not self._is_initialized(): # init state self._init_state() # fit initial model and initialize raw predictions if self.init_ == 'zero': raw_predictions = np.zeros(shape=(X.shape[0], self.loss_.K), dtype=np.float64) else: # XXX clean this once we have a support_sample_weight tag if sample_weight_is_none: self.init_.fit(X, y) else: msg = ("The initial estimator {} does not support sample " "weights.".format(self.init_.__class__.__name__)) try: self.init_.fit(X, y, sample_weight=sample_weight) except TypeError: # regular estimator without SW support raise ValueError(msg) except ValueError as e: if "pass parameters to specific steps of "\ "your pipeline using the "\ "stepname__parameter" in str(e): # pipeline raise ValueError(msg) from e else: # regular estimator whose input checking failed raise raw_predictions = \ self.loss_.get_init_raw_predictions(X, self.init_) begin_at_stage = 0 # The rng state must be preserved if warm_start is True self._rng = check_random_state(self.random_state) else: # add more estimators to fitted model # invariant: warm_start = True if self.n_estimators < self.estimators_.shape[0]: raise ValueError('n_estimators=%d must be larger or equal to ' 'estimators_.shape[0]=%d when ' 'warm_start==True' % (self.n_estimators, self.estimators_.shape[0])) begin_at_stage = self.estimators_.shape[0] # The requirements of _decision_function (called in two lines # below) are more constrained than fit. It accepts only CSR # matrices. X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') raw_predictions = self._raw_predict(X) self._resize_state() # fit the boosting stages n_stages = self._fit_stages( X, y, raw_predictions, sample_weight, self._rng, X_val, y_val, sample_weight_val, begin_at_stage, monitor) # change shape of arrays after fit (early-stopping or additional ests) if n_stages != self.estimators_.shape[0]: self.estimators_ = self.estimators_[:n_stages] self.train_score_ = self.train_score_[:n_stages] if hasattr(self, 'oob_improvement_'): self.oob_improvement_ = self.oob_improvement_[:n_stages] self.n_estimators_ = n_stages return self def _fit_stages(self, X, y, raw_predictions, sample_weight, random_state, X_val, y_val, sample_weight_val, begin_at_stage=0, monitor=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = np.ones((n_samples, ), dtype=bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self.loss_ if self.verbose: verbose_reporter = VerboseReporter(verbose=self.verbose) verbose_reporter.init(self, begin_at_stage) X_csc = csc_matrix(X) if issparse(X) else None X_csr = csr_matrix(X) if issparse(X) else None if self.n_iter_no_change is not None: loss_history = np.full(self.n_iter_no_change, np.inf) # We create a generator to get the predictions for X_val after # the addition of each successive stage y_val_pred_iter = self._staged_raw_predict(X_val) # perform boosting iterations i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): # subsampling if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) # OOB score before adding this stage old_oob_score = loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask]) # fit next stage of trees raw_predictions = self._fit_stage( i, X, y, raw_predictions, sample_weight, sample_mask, random_state, X_csc, X_csr) # track deviance (= loss) if do_oob: self.train_score_[i] = loss_(y[sample_mask], raw_predictions[sample_mask], sample_weight[sample_mask]) self.oob_improvement_[i] = ( old_oob_score - loss_(y[~sample_mask], raw_predictions[~sample_mask], sample_weight[~sample_mask])) else: # no need to fancy index w/ no subsampling self.train_score_[i] = loss_(y, raw_predictions, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break # We also provide an early stopping based on the score from # validation set (X_val, y_val), if n_iter_no_change is set if self.n_iter_no_change is not None: # By calling next(y_val_pred_iter), we get the predictions # for X_val after the addition of the current stage validation_loss = loss_(y_val, next(y_val_pred_iter), sample_weight_val) # Require validation_score to be better (less) than at least # one of the last n_iter_no_change evaluations if np.any(validation_loss + self.tol < loss_history): loss_history[i % len(loss_history)] = validation_loss else: break return i + 1 def _make_estimator(self, append=True): # we don't need _make_estimator raise NotImplementedError() def _raw_predict_init(self, X): """Check input and compute raw predictions of the init estimator.""" self._check_initialized() X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True) if X.shape[1] != self.n_features_: raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format( self.n_features_, X.shape[1])) if self.init_ == 'zero': raw_predictions = np.zeros(shape=(X.shape[0], self.loss_.K), dtype=np.float64) else: raw_predictions = self.loss_.get_init_raw_predictions( X, self.init_).astype(np.float64) return raw_predictions def _raw_predict(self, X): """Return the sum of the trees raw predictions (+ init estimator).""" raw_predictions = self._raw_predict_init(X) predict_stages(self.estimators_, X, self.learning_rate, raw_predictions) return raw_predictions def _staged_raw_predict(self, X): """Compute raw predictions of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- raw_predictions : generator of ndarray of shape (n_samples, k) The raw predictions of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') raw_predictions = self._raw_predict_init(X) for i in range(self.estimators_.shape[0]): predict_stage(self.estimators_, i, X, self.learning_rate, raw_predictions) yield raw_predictions.copy() @property def feature_importances_(self): """The impurity-based feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Warning: impurity-based feature importances can be misleading for high cardinality features (many unique values). See :func:`sklearn.inspection.permutation_importance` as an alternative. Returns ------- feature_importances_ : array, shape (n_features,) The values of this array sum to 1, unless all trees are single node trees consisting of only the root node, in which case it will be an array of zeros. """ self._check_initialized() relevant_trees = [tree for stage in self.estimators_ for tree in stage if tree.tree_.node_count > 1] if not relevant_trees: # degenerate case where all trees have only one node return np.zeros(shape=self.n_features_, dtype=np.float64) relevant_feature_importances = [ tree.tree_.compute_feature_importances(normalize=False) for tree in relevant_trees ] avg_feature_importances = np.mean(relevant_feature_importances, axis=0, dtype=np.float64) return avg_feature_importances / np.sum(avg_feature_importances) def _compute_partial_dependence_recursion(self, grid, target_features): """Fast partial dependence computation. Parameters ---------- grid : ndarray of shape (n_samples, n_target_features) The grid points on which the partial dependence should be evaluated. target_features : ndarray of shape (n_target_features,) The set of target features for which the partial dependence should be evaluated. Returns ------- averaged_predictions : ndarray of shape \ (n_trees_per_iteration, n_samples) The value of the partial dependence function on each grid point. """ if self.init is not None: warnings.warn( 'Using recursion method with a non-constant init predictor ' 'will lead to incorrect partial dependence values. ' 'Got init=%s.' % self.init, UserWarning ) grid = np.asarray(grid, dtype=DTYPE, order='C') n_estimators, n_trees_per_stage = self.estimators_.shape averaged_predictions = np.zeros((n_trees_per_stage, grid.shape[0]), dtype=np.float64, order='C') for stage in range(n_estimators): for k in range(n_trees_per_stage): tree = self.estimators_[stage, k].tree_ tree.compute_partial_dependence(grid, target_features, averaged_predictions[k]) averaged_predictions *= self.learning_rate return averaged_predictions def _validate_y(self, y, sample_weight): # 'sample_weight' is not utilised but is used for # consistency with similar method _validate_y of GBC self.n_classes_ = 1 if y.dtype.kind == 'O': y = y.astype(DOUBLE) # Default implementation return y def apply(self, X): """Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted to a sparse ``csr_matrix``. Returns ------- X_leaves : array-like of shape (n_samples, n_estimators, n_classes) For each datapoint x in X and for each tree in the ensemble, return the index of the leaf x ends up in each estimator. In the case of binary classification n_classes is 1. """ self._check_initialized() X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True) # n_classes will be equal to 1 in the binary classification or the # regression case. n_estimators, n_classes = self.estimators_.shape leaves = np.zeros((X.shape[0], n_estimators, n_classes)) for i in range(n_estimators): for j in range(n_classes): estimator = self.estimators_[i, j] leaves[:, i, j] = estimator.apply(X, check_input=False) return leaves class GradientBoostingClassifier(ClassifierMixin, BaseGradientBoosting): """Gradient Boosting for classification. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage ``n_classes_`` regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function. Binary classification is a special case where only a single regression tree is induced. Read more in the :ref:`User Guide <gradient_boosting>`. Parameters ---------- loss : {'deviance', 'exponential'}, default='deviance' loss function to be optimized. 'deviance' refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss 'exponential' gradient boosting recovers the AdaBoost algorithm. learning_rate : float, default=0.1 learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int, default=100 The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. subsample : float, default=1.0 The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. criterion : {'friedman_mse', 'mse', 'mae'}, default='friedman_mse' The function to measure the quality of a split. Supported criteria are 'friedman_mse' for the mean squared error with improvement score by Friedman, 'mse' for mean squared error, and 'mae' for the mean absolute error. The default value of 'friedman_mse' is generally the best as it can provide a better approximation in some cases. .. versionadded:: 0.18 min_samples_split : int or float, default=2 The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a fraction and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for fractions. min_samples_leaf : int or float, default=1 The minimum number of samples required to be at a leaf node. A split point at any depth will only be considered if it leaves at least ``min_samples_leaf`` training samples in each of the left and right branches. This may have the effect of smoothing the model, especially in regression. - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a fraction and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for fractions. min_weight_fraction_leaf : float, default=0.0 The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_depth : int, default=3 maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. min_impurity_decrease : float, default=0.0 A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 min_impurity_split : float, default=None Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. init : estimator or 'zero', default=None An estimator object that is used to compute the initial predictions. ``init`` has to provide :meth:`fit` and :meth:`predict_proba`. If 'zero', the initial raw predictions are set to zero. By default, a ``DummyEstimator`` predicting the classes priors is used. random_state : int or RandomState, default=None Controls the random seed given to each Tree estimator at each boosting iteration. In addition, it controls the random permutation of the features at each split (see Notes for more details). It also controls the random spliting of the training data to obtain a validation set if `n_iter_no_change` is not None. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. max_features : {'auto', 'sqrt', 'log2'}, int or float, default=None The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a fraction and `int(max_features * n_features)` features are considered at each split. - If 'auto', then `max_features=sqrt(n_features)`. - If 'sqrt', then `max_features=sqrt(n_features)`. - If 'log2', then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. verbose : int, default=0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. max_leaf_nodes : int, default=None Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. warm_start : bool, default=False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. See :term:`the Glossary <warm_start>`. validation_fraction : float, default=0.1 The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if ``n_iter_no_change`` is set to an integer. .. versionadded:: 0.20 n_iter_no_change : int, default=None ``n_iter_no_change`` is used to decide if early stopping will be used to terminate training when validation score is not improving. By default it is set to None to disable early stopping. If set to a number, it will set aside ``validation_fraction`` size of the training data as validation and terminate training when validation score is not improving in all of the previous ``n_iter_no_change`` numbers of iterations. The split is stratified. .. versionadded:: 0.20 tol : float, default=1e-4 Tolerance for the early stopping. When the loss is not improving by at least tol for ``n_iter_no_change`` iterations (if set to a number), the training stops. .. versionadded:: 0.20 ccp_alpha : non-negative float, default=0.0 Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ``ccp_alpha`` will be chosen. By default, no pruning is performed. See :ref:`minimal_cost_complexity_pruning` for details. .. versionadded:: 0.22 Attributes ---------- n_estimators_ : int The number of estimators as selected by early stopping (if ``n_iter_no_change`` is specified). Otherwise it is set to ``n_estimators``. .. versionadded:: 0.20 feature_importances_ : ndarray of shape (n_features,) The impurity-based feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Warning: impurity-based feature importances can be misleading for high cardinality features (many unique values). See :func:`sklearn.inspection.permutation_importance` as an alternative. oob_improvement_ : ndarray of shape (n_estimators,) The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. Only available if ``subsample < 1.0`` train_score_ : ndarray of shape (n_estimators,) The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. init_ : estimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : ndarray of DecisionTreeRegressor of \ shape (n_estimators, ``loss_.K``) The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary classification, otherwise n_classes. classes_ : ndarray of shape (n_classes,) The classes labels. n_features_ : int The number of data features. n_classes_ : int The number of classes. max_features_ : int The inferred value of max_features. Notes ----- The features are always randomly permuted at each split. Therefore, the best found split may vary, even with the same training data and ``max_features=n_features``, if the improvement of the criterion is identical for several splits enumerated during the search of the best split. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed. Examples -------- >>> from sklearn.datasets import make_classification >>> from sklearn.ensemble import GradientBoostingClassifier >>> from sklearn.model_selection import train_test_split >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> clf = GradientBoostingClassifier(random_state=0) >>> clf.fit(X_train, y_train) GradientBoostingClassifier(random_state=0) >>> clf.predict(X_test[:2]) array([1, 0]) >>> clf.score(X_test, y_test) 0.88 See also -------- sklearn.ensemble.HistGradientBoostingClassifier, sklearn.tree.DecisionTreeClassifier, RandomForestClassifier AdaBoostClassifier References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('deviance', 'exponential') @_deprecate_positional_args def __init__(self, *, loss='deviance', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, min_impurity_decrease=0., min_impurity_split=None, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False, validation_fraction=0.1, n_iter_no_change=None, tol=1e-4, ccp_alpha=0.0): super().__init__( loss=loss, learning_rate=learning_rate, n_estimators=n_estimators, criterion=criterion, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_depth=max_depth, init=init, subsample=subsample, max_features=max_features, random_state=random_state, verbose=verbose, max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, warm_start=warm_start, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, tol=tol, ccp_alpha=ccp_alpha) def _validate_y(self, y, sample_weight): check_classification_targets(y) self.classes_, y = np.unique(y, return_inverse=True) n_trim_classes = np.count_nonzero(np.bincount(y, sample_weight)) if n_trim_classes < 2: raise ValueError("y contains %d class after sample_weight " "trimmed classes with zero weights, while a " "minimum of 2 classes are required." % n_trim_classes) self.n_classes_ = len(self.classes_) return y def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- score : ndarray of shape (n_samples, n_classes) or (n_samples,) The decision function of the input samples, which corresponds to the raw values predicted from the trees of the ensemble . The order of the classes corresponds to that in the attribute :term:`classes_`. Regression and binary classification produce an array of shape [n_samples]. """ X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') raw_predictions = self._raw_predict(X) if raw_predictions.shape[1] == 1: return raw_predictions.ravel() return raw_predictions def staged_decision_function(self, X): """Compute decision function of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- score : generator of ndarray of shape (n_samples, k) The decision function of the input samples, which corresponds to the raw values predicted from the trees of the ensemble . The classes corresponds to that in the attribute :term:`classes_`. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ yield from self._staged_raw_predict(X) def predict(self, X): """Predict class for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : ndarray of shape (n_samples,) The predicted values. """ raw_predictions = self.decision_function(X) encoded_labels = \ self.loss_._raw_prediction_to_decision(raw_predictions) return self.classes_.take(encoded_labels, axis=0) def staged_predict(self, X): """Predict class at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : generator of ndarray of shape (n_samples,) The predicted value of the input samples. """ for raw_predictions in self._staged_raw_predict(X): encoded_labels = \ self.loss_._raw_prediction_to_decision(raw_predictions) yield self.classes_.take(encoded_labels, axis=0) def predict_proba(self, X): """Predict class probabilities for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Raises ------ AttributeError If the ``loss`` does not support probabilities. Returns ------- p : ndarray of shape (n_samples, n_classes) The class probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """ raw_predictions = self.decision_function(X) try: return self.loss_._raw_prediction_to_proba(raw_predictions) except NotFittedError: raise except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) def predict_log_proba(self, X): """Predict class log-probabilities for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Raises ------ AttributeError If the ``loss`` does not support probabilities. Returns ------- p : ndarray of shape (n_samples, n_classes) The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """ proba = self.predict_proba(X) return np.log(proba) def staged_predict_proba(self, X): """Predict class probabilities at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : generator of ndarray of shape (n_samples,) The predicted value of the input samples. """ try: for raw_predictions in self._staged_raw_predict(X): yield self.loss_._raw_prediction_to_proba(raw_predictions) except NotFittedError: raise except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting): """Gradient Boosting for regression. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage a regression tree is fit on the negative gradient of the given loss function. Read more in the :ref:`User Guide <gradient_boosting>`. Parameters ---------- loss : {'ls', 'lad', 'huber', 'quantile'}, default='ls' loss function to be optimized. 'ls' refers to least squares regression. 'lad' (least absolute deviation) is a highly robust loss function solely based on order information of the input variables. 'huber' is a combination of the two. 'quantile' allows quantile regression (use `alpha` to specify the quantile). learning_rate : float, default=0.1 learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int, default=100 The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. subsample : float, default=1.0 The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. criterion : {'friedman_mse', 'mse', 'mae'}, default='friedman_mse' The function to measure the quality of a split. Supported criteria are "friedman_mse" for the mean squared error with improvement score by Friedman, "mse" for mean squared error, and "mae" for the mean absolute error. The default value of "friedman_mse" is generally the best as it can provide a better approximation in some cases. .. versionadded:: 0.18 min_samples_split : int or float, default=2 The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a fraction and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. .. versionchanged:: 0.18 Added float values for fractions. min_samples_leaf : int or float, default=1 The minimum number of samples required to be at a leaf node. A split point at any depth will only be considered if it leaves at least ``min_samples_leaf`` training samples in each of the left and right branches. This may have the effect of smoothing the model, especially in regression. - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a fraction and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. .. versionchanged:: 0.18 Added float values for fractions. min_weight_fraction_leaf : float, default=0.0 The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. max_depth : int, default=3 maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. min_impurity_decrease : float, default=0.0 A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. .. versionadded:: 0.19 min_impurity_split : float, default=None Threshold for early stopping in tree growth. A node will split if its impurity is above the threshold, otherwise it is a leaf. .. deprecated:: 0.19 ``min_impurity_split`` has been deprecated in favor of ``min_impurity_decrease`` in 0.19. The default value of ``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it will be removed in 0.25. Use ``min_impurity_decrease`` instead. init : estimator or 'zero', default=None An estimator object that is used to compute the initial predictions. ``init`` has to provide :term:`fit` and :term:`predict`. If 'zero', the initial raw predictions are set to zero. By default a ``DummyEstimator`` is used, predicting either the average target value (for loss='ls'), or a quantile for the other losses. random_state : int or RandomState, default=None Controls the random seed given to each Tree estimator at each boosting iteration. In addition, it controls the random permutation of the features at each split (see Notes for more details). It also controls the random spliting of the training data to obtain a validation set if `n_iter_no_change` is not None. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. max_features : {'auto', 'sqrt', 'log2'}, int or float, default=None The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a fraction and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. alpha : float, default=0.9 The alpha-quantile of the huber loss function and the quantile loss function. Only if ``loss='huber'`` or ``loss='quantile'``. verbose : int, default=0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. max_leaf_nodes : int, default=None Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. warm_start : bool, default=False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. See :term:`the Glossary <warm_start>`. validation_fraction : float, default=0.1 The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if ``n_iter_no_change`` is set to an integer. .. versionadded:: 0.20 n_iter_no_change : int, default=None ``n_iter_no_change`` is used to decide if early stopping will be used to terminate training when validation score is not improving. By default it is set to None to disable early stopping. If set to a number, it will set aside ``validation_fraction`` size of the training data as validation and terminate training when validation score is not improving in all of the previous ``n_iter_no_change`` numbers of iterations. .. versionadded:: 0.20 tol : float, default=1e-4 Tolerance for the early stopping. When the loss is not improving by at least tol for ``n_iter_no_change`` iterations (if set to a number), the training stops. .. versionadded:: 0.20 ccp_alpha : non-negative float, default=0.0 Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ``ccp_alpha`` will be chosen. By default, no pruning is performed. See :ref:`minimal_cost_complexity_pruning` for details. .. versionadded:: 0.22 Attributes ---------- feature_importances_ : ndarray of shape (n_features,) The impurity-based feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance. Warning: impurity-based feature importances can be misleading for high cardinality features (many unique values). See :func:`sklearn.inspection.permutation_importance` as an alternative. oob_improvement_ : ndarray of shape (n_estimators,) The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. Only available if ``subsample < 1.0`` train_score_ : ndarray of shape (n_estimators,) The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. init_ : estimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : ndarray of DecisionTreeRegressor of shape (n_estimators, 1) The collection of fitted sub-estimators. n_classes_ : int The number of classes, set to 1 in regression tasks. n_estimators_ : int The number of estimators as selected by early stopping (if ``n_iter_no_change`` is specified). Otherwise it is set to ``n_estimators``. n_features_ : int The number of data features. max_features_ : int The inferred value of max_features. Notes ----- The features are always randomly permuted at each split. Therefore, the best found split may vary, even with the same training data and ``max_features=n_features``, if the improvement of the criterion is identical for several splits enumerated during the search of the best split. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed. Examples -------- >>> from sklearn.datasets import make_regression >>> from sklearn.ensemble import GradientBoostingRegressor >>> from sklearn.model_selection import train_test_split >>> X, y = make_regression(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> reg = GradientBoostingRegressor(random_state=0) >>> reg.fit(X_train, y_train) GradientBoostingRegressor(random_state=0) >>> reg.predict(X_test[1:2]) array([-61...]) >>> reg.score(X_test, y_test) 0.4... See also -------- sklearn.ensemble.HistGradientBoostingRegressor, sklearn.tree.DecisionTreeRegressor, RandomForestRegressor References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile') @_deprecate_positional_args def __init__(self, *, loss='ls', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, min_impurity_decrease=0., min_impurity_split=None, init=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, validation_fraction=0.1, n_iter_no_change=None, tol=1e-4, ccp_alpha=0.0): super().__init__( loss=loss, learning_rate=learning_rate, n_estimators=n_estimators, criterion=criterion, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_depth=max_depth, init=init, subsample=subsample, max_features=max_features, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, random_state=random_state, alpha=alpha, verbose=verbose, max_leaf_nodes=max_leaf_nodes, warm_start=warm_start, validation_fraction=validation_fraction, n_iter_no_change=n_iter_no_change, tol=tol, ccp_alpha=ccp_alpha) def predict(self, X): """Predict regression target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : ndarray of shape (n_samples,) The predicted values. """ X = check_array(X, dtype=DTYPE, order="C", accept_sparse='csr') # In regression we can directly return the raw value from the trees. return self._raw_predict(X).ravel() def staged_predict(self, X): """Predict regression target at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- y : generator of ndarray of shape (n_samples,) The predicted value of the input samples. """ for raw_predictions in self._staged_raw_predict(X): yield raw_predictions.ravel() def apply(self, X): """Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``dtype=np.float32``. If a sparse matrix is provided, it will be converted to a sparse ``csr_matrix``. Returns ------- X_leaves : array-like of shape (n_samples, n_estimators) For each datapoint x in X and for each tree in the ensemble, return the index of the leaf x ends up in each estimator. """ leaves = super().apply(X) leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0]) return leaves
bsd-3-clause
secimTools/SECIMTools
src/scripts/secimtools/anovaModules/getModelResultsByGroup.py
2
2414
# Import built-in modules import re # Import Add-on modules import pandas as pd import numpy as np # Import ANOVA Modules from secimtools.anovaModules.gimmeTheMissin import gimmeTheMissin def getModelResultsByGroup(model,levels, numerical): """ This functiuon generates the results by group for and ANOVA object (model). The results that it generates includes: - Coeficients - Std. error - T values - Probability for the t-values :Arguments: :type model: statsmodel.ols :param model: ANOVA model :type levels: list :param levels: groups inside a factor. :type numerical: list :param numerical: Numerical factor(s) if any. :Returns: :rtype preFormula: list :return preFormula: List withh all the missing values. """ # Extracting the parameters we are interested in from ANOVA # These values are going to be used multiple times coef = -(model.params) stde = model.bse t = -(model.tvalues) pt = model.pvalues log = -np.log10(model.pvalues) #Add name to previous series t.name ="t-Value_for_Diff" stde.name ="stdError_for_Diff" coef.name ="diff_of" pt.name ="prob_greater_than_t_for_diff" log.name ="-log10_p-value_" # Concat all dataframes df = pd.concat([coef,stde,t,pt,log],axis=1) # Removing intercepts df.drop("Intercept",inplace=True,axis="index") # Removing numerical factors for numeric in numerical: if numeric in df.index.tolist(): df.drop(numeric,inplace=True,axis="index") # New Index Names newIndexNames = {origIndx:re.sub(".+\[T\.|\]","",origIndx)for origIndx in df.index.tolist()} # Rename df indexes with new Indexes names df.rename(newIndexNames,inplace=True) #Getting the baseline baseLines = gimmeTheMissin(df.index.tolist(),levels) # Creating pretty names for indexes oldIndex = dict() for origIndx,base in zip(df.index.tolist(),baseLines): if base == origIndx: df.drop(origIndx,inplace=True) else: oldIndex[origIndx] = "{0}-{1}".format(base,origIndx) # Creating df.replace(-0,np.nan, inplace=True) df.replace(0,np.nan, inplace=True) #Rename indexs df.rename(index=oldIndex, inplace=True) #Returns return df
mit
nickpowersys/CaaR
caar/history.py
1
12592
from __future__ import absolute_import, division, print_function import pickle import random from collections import namedtuple import pandas as pd from caar.cleanthermostat import _sort_meta_in_col_order, dict_from_file from future import standard_library standard_library.install_aliases() Cycle = namedtuple('Cycle', ['device_id', 'cycle_mode', 'start_time']) Sensor = namedtuple('Sensor', ['sensor_id', 'timestamp']) Geospatial = namedtuple('Geospatial', ['location_id', 'timestamp']) def create_sensors_df(dict_or_pickle_file, sensor_ids=None): """Returns pandas DataFrame containing sensor ID, timestamps and sensor observations. Args: dict_or_pickle_file (dict or str): The object must have been created with dict_from_file() or pickle_from_file() function. sensor_ids (Optional[list or other iterable of ints or strings]): Sensor IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame. Returns: sensors_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps. """ fields = list(Sensor._fields) multi_ids, vals, meta = _records_as_lists_of_tuples(dict_or_pickle_file, fields, ids=sensor_ids) id_labels = [meta[col]['heading'] for col in ['id', 'time']] data_labels = _data_labels_from_meta(meta, id_labels) sensors_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals) return sensors_df def sensors_df_from_text(raw_file, states=None, sensors_file=None, postal_file=None, auto='sensors', id_col_heading=None, encoding='UTF-8', delimiter=None, quote=None, cols_to_ignore=None, meta=False, sensor_ids=None): sensors = dict_from_file(raw_file, states=states, sensors_file=sensors_file, postal_file=postal_file, auto=auto, id_col_heading=id_col_heading, encoding=encoding, delimiter=delimiter, quote=quote, cols_to_ignore=cols_to_ignore, meta=meta) return create_sensors_df(sensors, sensor_ids=sensor_ids) def sensors_df_from_bin(pickle_file, sensor_ids=None): """Returns pandas DataFrame containing sensor ID, timestamps and sensor observations. Args: pickle_file (str): The pickle file must have been created with pickle_from_file() function. sensor_ids (Optional[list or other iterable of ints or strings]): Sensor IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame. Returns: sensors_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps. """ fields = list(Sensor._fields) multi_ids, vals, meta = _records_as_lists_of_tuples(pickle_file, fields, ids=sensor_ids) id_labels = [meta[col]['heading'] for col in ['id', 'time']] data_labels = _data_labels_from_meta(meta, id_labels) sensors_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals) return sensors_df def create_cycles_df(dict_or_pickle_file, device_ids=None): """Returns pandas DataFrame containing sensor ids and cycle beginning timestamps as multi-part indexes, and cycle ending times as values. Args: dict_or_pickle_file (dict or str): Must have been created with dict_from_file() or pickle_from_file() function. device_ids (Optional[list or other iterable of ints or strings]): Sensor IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame. Returns: cycles_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps. """ multi_ids, vals, meta = _records_as_lists_of_tuples(dict_or_pickle_file, list(Cycle._fields), ids=device_ids) id_labels = [meta[col]['heading'] for col in ['id', 'cycle', 'start_time']] data_labels = _data_labels_from_meta(meta, id_labels) cycles_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals) return cycles_df def cycles_df_from_text(raw_file, cycle=None, states=None, postal_file=None, auto='cycles', id_col_heading=None, cycle_col_heading=None, encoding='UTF-8', delimiter=None, quote=None, cols_to_ignore=None, meta=False, device_ids=None): cycles = dict_from_file(raw_file, cycle=cycle, states=states, postal_file=postal_file, auto=auto, id_col_heading=id_col_heading, cycle_col_heading=cycle_col_heading, encoding=encoding, delimiter=delimiter, quote=quote, cols_to_ignore=cols_to_ignore, meta=meta) return create_cycles_df(cycles, device_ids=device_ids) def cycles_df_from_bin(pickle_file, device_ids=None): """Returns pandas DataFrame containing sensor ids and cycle beginning timestamps as multi-part indexes, and cycle ending times as values. Args: pickle_file (dict or str): Must have been created with dict_from_file() or pickle_from_file() function. device_ids (Optional[list or other iterable of ints or strings]): Sensor IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame. Returns: cycles_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps. """ multi_ids, vals, meta = _records_as_lists_of_tuples(pickle_file, list(Cycle._fields), ids=device_ids) id_labels = [meta[col]['heading'] for col in ['id', 'cycle', 'start_time']] data_labels = _data_labels_from_meta(meta, id_labels) cycles_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals) return cycles_df def create_geospatial_df(dict_or_pickle_file, location_ids=None): """Returns pandas DataFrame containing records with location IDs and time stamps as multi-part indexes and outdoor temperatures as values. Args: dict_or_pickle_file (dict or str): Must have been created with dict_from_file() or pickle_from_file() function. location_ids (Optional[list or other iterable of ints or strings]): Location IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame. Returns: geospatial_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps. """ fields = list(Geospatial._fields) multi_ids, vals, meta = _records_as_lists_of_tuples(dict_or_pickle_file, fields, ids=location_ids) id_labels = [meta[col]['heading'] for col in ['id', 'time']] data_labels = _data_labels_from_meta(meta, id_labels) geospatial_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals) return geospatial_df def geospatial_df_from_text(raw_file, states=None, sensors_file=None, postal_file=None, auto='geospatial', id_col_heading=None, encoding='UTF-8', delimiter=None, quote=None, cols_to_ignore=None, meta=False, location_ids=None): geos = dict_from_file(raw_file, states=states, sensors_file=sensors_file, postal_file=postal_file, auto=auto, id_col_heading=id_col_heading, encoding=encoding, delimiter=delimiter, quote=quote, cols_to_ignore=cols_to_ignore, meta=meta) return create_geospatial_df(geos, locations_ids=location_ids) def geospatial_df_from_bin(pickle_file, location_ids=None): """Returns pandas DataFrame containing records with location IDs and time stamps as multi-part indexes and outdoor temperatures as values. Args: pickle_file (str): Must have been created with pickle_from_file() function. location_ids (Optional[list or other iterable of ints or strings]): Location IDs. If no argument is specified, all IDs from the first arg will be in the DataFrame. Returns: geospatial_df (pandas DataFrame): DataFrame has MultiIndex based on the ID(s) and timestamps. """ fields = list(Geospatial._fields) multi_ids, vals, meta = _records_as_lists_of_tuples(pickle_file, fields, ids=location_ids) id_labels = [meta[col]['heading'] for col in ['id', 'time']] data_labels = _data_labels_from_meta(meta, id_labels) geospatial_df = _create_multi_index_df(id_labels, multi_ids, data_labels, vals) return geospatial_df def _records_as_lists_of_tuples(dict_or_pickle_file, fields, ids=None): """Returns tuple containing 1) a list of named tuples containing sensor (or outdoor location) ids and timestamps and 2) a list of either indoor (or outdoor) temperatures, or the ending time of a cycle, based on input of a pickle file containing a dict. """ records = {} if isinstance(dict_or_pickle_file, dict): records = dict_or_pickle_file['records'] meta = dict_or_pickle_file['cols_meta'] else: try: with open(dict_or_pickle_file, 'rb') as cp: container = pickle.load(cp) records = container['records'] meta = container['cols_meta'] except ValueError: print('The first argument must be a pickle file or dict.') if ids is not None: for record_key in list(records.keys()): # Discard record if it is not among the desired ids. if getattr(record_key, fields[0]) not in ids: records.pop(record_key, None) multi_ids, vals = _multi_ids_and_data_vals(records, fields) return multi_ids, vals, meta def _data_labels_from_meta(meta, id_labels): sorted_meta = _sort_meta_in_col_order(meta) data_labels = [meta[col]['heading'] for col in list(sorted_meta)[len(id_labels):]] return data_labels def random_record(dict_or_pickle_file, value_only=False): """Returns a randomly chosen key-value pair from a dict or pickle file.""" records = {} if isinstance(dict_or_pickle_file, dict): records = dict_or_pickle_file['records'] else: try: with open(dict_or_pickle_file, 'rb') as cp: container = pickle.load(cp) records = container['records'] except ValueError: print('The first argument must be a pickle file or dict.') copied_keys = list(records.keys()) random_record_key = _random_record_key(copied_keys) if value_only: return records[random_record_key] else: return random_record_key, records[random_record_key] def _random_record_key(keys): try: random_record_key = random.choice(keys) except IndexError: print('No records in the dict or pickle file.') else: return random_record_key def _multi_ids_and_data_vals(records, fields): """Returns tuple containing 1) a list of named tuples containing ids and timestamps (and cycle modes if applicable) and 2) a list of either temperatures or cycle ending times, based on items (records) in a dict. """ multi_ids = [] vals = [] for k, v in records.items(): ids = tuple(getattr(k, f) for f in fields) multi_ids.append(ids) vals.append(v) return multi_ids, vals def _create_multi_index_df(multiindex_names, multi_ids, column_names, values): """Returns MultiIndex pandas dataframe in which the index columns are for an id and timestamp and the value is for a temperature or a timestamp indicating the end of a cycle. """ multiindex_columns = tuple(multiindex_names) multicols = pd.MultiIndex.from_tuples(multi_ids, names=multiindex_columns) df = pd.DataFrame(values, index=multicols, columns=column_names) df.sort_index(inplace=True, sort_remaining=True) return df
bsd-3-clause
fishcorn/pylearn2
pylearn2/models/svm.py
6
3259
"""Wrappers for SVM models.""" __authors__ = "Ian Goodfellow" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" __email__ = "pylearn-dev@googlegroups" import numpy as np import warnings try: from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC except ImportError: warnings.warn("Could not import sklearn.") class OneVsRestClassifier(object): """ See `sklearn.multiclass.OneVsRestClassifier`. Notes ----- This class is a dummy class included so that sphinx can import DenseMulticlassSVM and document it even when sklearn is not installed. """ def __init__(self, estimator): raise RuntimeError("sklearn not available.") class DenseMulticlassSVM(OneVsRestClassifier): """ sklearn does very different things behind the scenes depending upon the exact identity of the class you use. The only way to get an SVM implementation that works with dense data is to use the `SVC` class, which implements one-against-one classification. This wrapper uses it to implement one-against- rest classification, which generally works better in my experiments. To avoid duplicating the training data, use only numpy ndarrays whose tags.c_contigous flag is true, and which are in float64 format. Parameters ---------- C : float SVM regularization parameter. See SVC.__init__ for details. kernel : str Type of kernel to use. See SVC.__init__ for details. gamma : float Optional parameter of kernel. See SVC.__init__ for details. coef0 : float Optional parameter of kernel. See SVC.__init__ for details. degree : int Degree of kernel, if kernel is polynomial. See SVC.__init__ for details. """ def __init__(self, C, kernel='rbf', gamma=1.0, coef0=1.0, degree=3): estimator = SVC(C=C, kernel=kernel, gamma=gamma, coef0=coef0, degree=degree) super(DenseMulticlassSVM, self).__init__(estimator) def fit(self, X, y): """ Fit underlying estimators. Parameters ---------- X : array-like, shape = [n_samples, n_features] Data. y : array-like, shape = [n_samples] or [n_samples, n_classes] Multi-class targets. An indicator matrix turns on multilabel classification. Returns ------- self """ super(DenseMulticlassSVM, self).fit(X, y) return self def decision_function(self, X): """ Returns the distance of each sample from the decision boundary for each class. Parameters ---------- X : array-like, shape = [n_samples, n_features] A 2D ndarray with each row containing the input features for one example. Returns ------- T : array-like, shape = [n_samples, n_classes] """ return np.column_stack([estimator.decision_function(X) for estimator in self.estimators_])
bsd-3-clause
Rbeaty88/ginga
scripts/example5_mpl.py
1
5695
#! /usr/bin/env python # # example5_mpl.py -- Load a fits file into a Ginga widget with a # matplotlib backend. # # Eric Jeschke ([email protected]) # # Copyright (c) Eric R. Jeschke. All rights reserved. # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. # # """ $ ./example5_mpl.py [fits file] This example program shows to capture button and keypress events for your own use. After loading a FITS file use the following keys: Press 'x' to turn on capture of events and bypass most normal keystroke processing. Press it again to resume normal processing. An on-screen message will tell you which mode you are in. While in 'capture mode' you can draw points with the right mouse button. Press 'c' to clear the canvas of drawn points. """ import sys, os # just in case you want to use qt os.environ['QT_API'] = 'pyqt' import matplotlib options = ['Qt4Agg', 'GTK', 'GTKAgg', 'MacOSX', 'GTKCairo', 'WXAgg', 'TkAgg', 'QtAgg', 'FltkAgg', 'WX'] # Force a specific toolkit, if you leave commented matplotlib will choose # an appropriate one for your system #matplotlib.use('Qt4Agg') import matplotlib.pyplot as plt import matplotlib.patches as patches import numpy as np from ginga.mplw.ImageViewCanvasMpl import ImageViewCanvas from ginga.mplw.ImageViewCanvasTypesMpl import DrawingCanvas from ginga.AstroImage import AstroImage from ginga.misc import log from ginga import cmap # Set to True to get diagnostic logging output use_logger = False class MyGingaFigure(object): def __init__(self, logger, fig): self.logger = logger # create a ginga object and tell it about the figure fi = ImageViewCanvas(logger) fi.enable_autocuts('on') fi.set_autocut_params('zscale') fi.add_callback('key-press', self.key_press_ginga) fi.set_figure(fig) self.fitsimage = fi # enable all interactive features fi.get_bindings().enable_all(True) canvas = DrawingCanvas() canvas.enable_draw(True) canvas.set_callback('button-press', self.btn_down) #canvas.set_callback('motion', self.drag) canvas.set_callback('button-release', self.btn_up) canvas.set_drawtype('point', color='cyan') canvas.set_callback('draw-event', self.draw_event) canvas.add_callback('key-press', self.key_press) canvas.setSurface(self.fitsimage) canvas.ui_setActive(True) self.canvas = canvas def load(self, fitspath): # load an image image = AstroImage(self.logger) image.load_file(fitspath) self.fitsimage.set_image(image) def capture(self): """ Insert our canvas so that we intercept all events before they reach processing by the bindings layer of Ginga. """ # insert the canvas self.fitsimage.add(self.canvas, tag='mycanvas') def release(self): """ Remove our canvas so that we no longer intercept events. """ # retract the canvas self.fitsimage.deleteObjectByTag('mycanvas') def clear(self): """ Clear the canvas of any drawing made on it. """ self.canvas.deleteAllObjects() def get_wcs(self, data_x, data_y): """Return (re_deg, dec_deg) for the (data_x, data_y) position based on any WCS associated with the loaded image. """ img = self.fitsimage.get_image() ra, dec = img.pixtoradec(data_x, data_y) return ra, dec # CALLBACKS # NOTE: return values on callbacks are important: if True then lower # layer Ginga canvas items will not get events def key_press(self, canvas, keyname): if keyname == 'x': self.fitsimage.onscreen_message("Moving to regular mode", delay=1.0) self.release() elif keyname == 'c': self.clear() return True fi = canvas.fitsimage data_x, data_y = fi.get_last_data_xy() ra, dec = self.get_wcs(data_x, data_y) print "key %s pressed at data %d,%d ra=%s dec=%s" % ( keyname, data_x, data_y, ra, dec) return True def key_press_ginga(self, fitsimage, keyname): if keyname == 'x': self.fitsimage.onscreen_message("Moving to capture mode", delay=1.0) self.capture() return True def btn_down(self, canvas, button, data_x, data_y): ra, dec = self.get_wcs(data_x, data_y) print "button %s pressed at data %d,%d ra=%s dec=%s" % ( button, data_x, data_y, ra, dec) return False def btn_up(self, canvas, button, data_x, data_y): ra, dec = self.get_wcs(data_x, data_y) print "button %s released at data %d,%d ra=%s dec=%s" % ( button, data_x, data_y, ra, dec) return False def draw_event(self, canvas, tag): obj = canvas.getObjectByTag(tag) data_x, data_y = obj.x, obj.y ra, dec = self.get_wcs(data_x, data_y) print "A %s was drawn at data %d,%d ra=%s dec=%s" % ( obj.kind, data_x, data_y, ra, dec) return True # create a regular matplotlib figure fig = plt.figure() # Here is our object logger = log.get_logger(null=not use_logger, log_stderr=True) foo = MyGingaFigure(logger, fig) # load an image, if one was provided if len(sys.argv) > 1: foo.load(sys.argv[1]) # Press 'x' to turn on capture of events. Press it again to resume normal # processing of events. # Press 'c' to clear the canvas of drawn points. plt.show()
bsd-3-clause
madjelan/scikit-learn
sklearn/svm/tests/test_svm.py
116
31653
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from scipy import sparse from nose.tools import assert_raises, assert_true, assert_equal, assert_false from sklearn.base import ChangedBehaviorWarning from sklearn import svm, linear_model, datasets, metrics, base from sklearn.cross_validation import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils import ConvergenceWarning from sklearn.utils.validation import NotFittedError from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deteriministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) def test_single_sample_1d(): # Test whether SVCs work on a single sample given as a 1-d array clf = svm.SVC().fit(X, Y) clf.predict(X[0]) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf.predict(X[0]) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1 assert np.abs(score1 - score2) < 0.1 def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM() clf.fit(X) pred = clf.predict(T) assert_array_almost_equal(pred, [-1, -1, -1]) assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]], decimal=3) assert_raises(ValueError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(probability=True, random_state=0, C=1.0), svm.NuSVC(probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) # check deprecation warning clf.decision_function_shape = None msg = "change the shape of the decision function" dec = assert_warns_message(ChangedBehaviorWarning, msg, clf.decision_function, X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_decision_function(): # Test SVR's decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel()) def test_weight(): # Test class weights clf = svm.SVC(class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC()): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC() clf.fit(X, Y) assert_array_equal(clf.predict(X[2]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict(X[2]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC() clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='weighted') <= metrics.f1_score(y, y_pred_balanced, average='weighted')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC() assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC().fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC() clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1/L1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2/L2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("L2", "squared_hinge", "loss='L2'", "1.0"), svm.LinearSVC(loss="L2").fit, X, y) # LinearSVR # loss l1/L1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("L1", "epsilon_insensitive", "loss='L1'", "1.0"), svm.LinearSVR(loss="L1").fit, X, y) # loss l2/L2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) # FIXME remove in 0.18 def test_linear_svx_uppercase_loss_penalty(): # Check if Upper case notation is supported by _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the uppercase notation will be removed in %s") # loss SQUARED_hinge --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("SQUARED_hinge", "squared_hinge", "0.18"), svm.LinearSVC(loss="SQUARED_hinge").fit, X, y) # penalty L2 --> l2 assert_warns_message(DeprecationWarning, msg.replace("loss", "penalty") % ("L2", "l2", "0.18"), svm.LinearSVC(penalty="L2").fit, X, y) # loss EPSILON_INSENSITIVE --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive", "0.18"), svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC() assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR() assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svc_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(max_iter=2, verbose=1) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
bsd-3-clause
kerzhner/airflow
airflow/hooks/presto_hook.py
24
3472
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from builtins import str import logging from pyhive import presto from pyhive.exc import DatabaseError from airflow.hooks.dbapi_hook import DbApiHook logging.getLogger("pyhive").setLevel(logging.INFO) class PrestoException(Exception): pass class PrestoHook(DbApiHook): """ Interact with Presto through PyHive! >>> ph = PrestoHook() >>> sql = "SELECT count(1) AS num FROM airflow.static_babynames" >>> ph.get_records(sql) [[340698]] """ conn_name_attr = 'presto_conn_id' default_conn_name = 'presto_default' def get_conn(self): """Returns a connection object""" db = self.get_connection(self.presto_conn_id) return presto.connect( host=db.host, port=db.port, username=db.login, catalog=db.extra_dejson.get('catalog', 'hive'), schema=db.schema) @staticmethod def _strip_sql(sql): return sql.strip().rstrip(';') def get_records(self, hql, parameters=None): """ Get a set of records from Presto """ try: return super(PrestoHook, self).get_records( self._strip_sql(hql), parameters) except DatabaseError as e: if (hasattr(e, 'message') and 'errorName' in e.message and 'message' in e.message): # Use the structured error data in the raised exception raise PrestoException('{name}: {message}'.format( name=e.message['errorName'], message=e.message['message'])) else: raise PrestoException(str(e)) def get_first(self, hql, parameters=None): """ Returns only the first row, regardless of how many rows the query returns. """ try: return super(PrestoHook, self).get_first( self._strip_sql(hql), parameters) except DatabaseError as e: raise PrestoException(e[0]['message']) def get_pandas_df(self, hql, parameters=None): """ Get a pandas dataframe from a sql query. """ import pandas cursor = self.get_cursor() try: cursor.execute(self._strip_sql(hql), parameters) data = cursor.fetchall() except DatabaseError as e: raise PrestoException(e[0]['message']) column_descriptions = cursor.description if data: df = pandas.DataFrame(data) df.columns = [c[0] for c in column_descriptions] else: df = pandas.DataFrame() return df def run(self, hql, parameters=None): """ Execute the statement against Presto. Can be used to create views. """ return super(PrestoHook, self).run(self._strip_sql(hql), parameters) def insert_rows(self): raise NotImplementedError()
apache-2.0
aflaxman/scikit-learn
benchmarks/bench_plot_lasso_path.py
84
4005
"""Benchmarks of Lasso regularization path computation using Lars and CD The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function from collections import defaultdict import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path from sklearn.linear_model import lasso_path from sklearn.datasets.samples_generator import make_regression def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') dataset_kwargs = { 'n_samples': n_samples, 'n_features': n_features, 'n_informative': n_features / 10, 'effective_rank': min(n_samples, n_features) / 10, #'effective_rank': None, 'bias': 0.0, } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) X, y = make_regression(**dataset_kwargs) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (with Gram)'].append(delta) gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (without Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (with Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=True) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (with Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (without Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=False) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (without Gram)'].append(delta) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(10, 2000, 5).astype(np.int) features_range = np.linspace(10, 2000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(max(t) for t in results.values()) fig = plt.figure('scikit-learn Lasso path benchmark results') i = 1 for c, (label, timings) in zip('bcry', sorted(results.items())): ax = fig.add_subplot(2, 2, i, projection='3d') X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) # ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.set_zlim3d(0.0, max_time * 1.1) ax.set_title(label) # ax.legend() i += 1 plt.show()
bsd-3-clause
kostyfisik/fdtd-1d
fdtd-step4-Fresnel.py
1
4768
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # Copyright (C) 2015 Konstantin Ladutenko <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # 1D FDTD check Fresnel equations # Based on Understanding the Finite-Difference Time-Domain Method, John # B. Schneider, www.eecs.wsu.edu/~schneidj/ufdtd, 2010. import numpy as np import math as m import matplotlib.pyplot as plt from time import sleep imp0=377.0 # Free space impedance size=1800 # Domain size #Dielectric distribution epsilon1 = 8 epsilon2 = 2 #isOutput = False isOutput = True def fdtd_fresnel(epsilon1, epsilon2): n1 = np.sqrt(epsilon1) n2 = np.sqrt(epsilon2) eps= np.ones(size) eps[:int(size/2.0)] = epsilon1 eps[int(size/2.0):] = epsilon2 # setting ABC constants _AFTER_ epsilon (we need speed of ligth in media) # Taflove, eq. 6.35 # Left boundary c = 1/np.sqrt(eps[0]) al = (c-1)/(c+1) bl = 2/(c + 1) wl_nm1,wl_n,wl_np1 = 0,0,0 # Field at x=0 at time steps n-1, n, n+1 wlp1_nm1,wlp1_n,wlp1_np1 = 0,0,0 # Field at x=1 at time steps n-1, n, n+1 # Right boundary c = 1/np.sqrt(eps[-1]) ar = (c-1)/(c+1) br = 2/(c + 1) wr_nm1,wr_n,wr_np1 = 0,0,0 # Field at x=size at time steps n-1, n, n+1 wrm1_nm1,wrm1_n,wrm1_np1 = 0,0,0 # Field at x=size-1 at time steps n-1, n, n+1 #Source source_width = 30.0*np.sqrt(max(epsilon1,epsilon2)) delay = 10*source_width source_x = int(1.0*size/10.0) #Source position def source(current_time, delay, source_width): return m.exp(-(current_time-delay)**2/(2.0 * source_width**2)) #Monitor points Emax1_x = int(2.0*size/15.0) Emax2_x = int(14.0*size/15.0) Emax1, Emax2 = 0,0 Hmax1, Hmax2 = 0,0 #Model total_steps = int(size*3.0+delay) # Time stepping frame_interval = int(total_steps/15.0) all_steps = np.linspace(0, size-1, size) #Inital field E_z and H_y is equal to zero ez = np.zeros(size) hy = np.zeros(size) x = np.arange(0,size-1,1) #print(x) for time in xrange(total_steps): ###################### #Magnetic field ###################### hy[x] = hy[x] + (ez[x+1] - ez[x])/imp0 #Evaluate Mur ABC value (eq. 6.35 Taflove) wrm1_np1 = hy[-2] wr_np1 = -wrm1_nm1 + ar*(wrm1_np1+wr_nm1) + br*(wr_n+wrm1_n) hy[-1] = wr_np1 #Cycle field values at boundary wr_nm1, wrm1_nm1 = wr_n, wrm1_n wr_n, wrm1_n = wr_np1, wrm1_np1 ###################### #Electric field ###################### ez[x+1] = ez[x+1] + (hy[x+1]-hy[x])*imp0/eps[x+1] ez[source_x] += source(time, delay, source_width) #Evaluate Mur ABC value (eq. 6.35 Taflove) wlp1_np1 = ez[1] wl_np1 = -wlp1_nm1 + al*(wlp1_np1+wl_nm1) + bl*(wl_n+wlp1_n) ez[0] = wl_np1 #Cycle field values at boundary wl_nm1, wlp1_nm1 = wl_n, wlp1_n wl_n, wlp1_n = wl_np1, wlp1_np1 ###################### #Monitor ###################### Emax1 = max(Emax1, np.abs(ez[Emax1_x])) Emax2 = max(Emax2, np.abs(ez[Emax2_x])) Hmax1 = max(Hmax1, np.abs(hy[Emax1_x])) Hmax2 = max(Hmax2, np.abs(hy[Emax2_x])) ###################### # Output ###################### if time % frame_interval == 0 and isOutput: plt.clf() plt.title("Ez after t=%i"%time) plt.plot(all_steps, ez, all_steps, hy*imp0) plt.show() print("n1 = %f, n2 = %f" % (n1, n2)) Fresnel_ratio = 1-np.abs((n1-n2)/(n1+n2))**2 print("Fresnel equation ratio 1-|(n1-n2)/(n1+n2)|^2 = %f" % Fresnel_ratio) FDTD_ratio = Emax2*Hmax2/(Emax1*Hmax1) print ("FDTD ratio = %f"%FDTD_ratio) error = np.abs((FDTD_ratio-Fresnel_ratio)/Fresnel_ratio) print("Error = %f%%" % (error*100.0) ) return epsilon1, epsilon2, FDTD_ratio,Fresnel_ratio, error*100 epsilon1 = 8 epsilon2 = 2 a1=fdtd_fresnel(epsilon1, epsilon2) epsilon1 = 3 epsilon2 = 5 a2=fdtd_fresnel(epsilon1, epsilon2) epsilon1 = 1 epsilon2 = 9 a3=fdtd_fresnel(epsilon1, epsilon2)
gpl-3.0
xwolf12/scikit-learn
sklearn/tests/test_base.py
216
7045
# Author: Gael Varoquaux # License: BSD 3 clause import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_raises from sklearn.base import BaseEstimator, clone, is_classifier from sklearn.svm import SVC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.utils import deprecated ############################################################################# # A few test classes class MyEstimator(BaseEstimator): def __init__(self, l1=0, empty=None): self.l1 = l1 self.empty = empty class K(BaseEstimator): def __init__(self, c=None, d=None): self.c = c self.d = d class T(BaseEstimator): def __init__(self, a=None, b=None): self.a = a self.b = b class DeprecatedAttributeEstimator(BaseEstimator): def __init__(self, a=None, b=None): self.a = a if b is not None: DeprecationWarning("b is deprecated and renamed 'a'") self.a = b @property @deprecated("Parameter 'b' is deprecated and renamed to 'a'") def b(self): return self._b class Buggy(BaseEstimator): " A buggy estimator that does not set its parameters right. " def __init__(self, a=None): self.a = 1 class NoEstimator(object): def __init__(self): pass def fit(self, X=None, y=None): return self def predict(self, X=None): return None class VargEstimator(BaseEstimator): """Sklearn estimators shouldn't have vargs.""" def __init__(self, *vargs): pass ############################################################################# # The tests def test_clone(): # Tests that clone creates a correct deep copy. # We create an estimator, make a copy of its original state # (which, in this case, is the current state of the estimator), # and check that the obtained copy is a correct deep copy. from sklearn.feature_selection import SelectFpr, f_classif selector = SelectFpr(f_classif, alpha=0.1) new_selector = clone(selector) assert_true(selector is not new_selector) assert_equal(selector.get_params(), new_selector.get_params()) selector = SelectFpr(f_classif, alpha=np.zeros((10, 2))) new_selector = clone(selector) assert_true(selector is not new_selector) def test_clone_2(): # Tests that clone doesn't copy everything. # We first create an estimator, give it an own attribute, and # make a copy of its original state. Then we check that the copy doesn't # have the specific attribute we manually added to the initial estimator. from sklearn.feature_selection import SelectFpr, f_classif selector = SelectFpr(f_classif, alpha=0.1) selector.own_attribute = "test" new_selector = clone(selector) assert_false(hasattr(new_selector, "own_attribute")) def test_clone_buggy(): # Check that clone raises an error on buggy estimators. buggy = Buggy() buggy.a = 2 assert_raises(RuntimeError, clone, buggy) no_estimator = NoEstimator() assert_raises(TypeError, clone, no_estimator) varg_est = VargEstimator() assert_raises(RuntimeError, clone, varg_est) def test_clone_empty_array(): # Regression test for cloning estimators with empty arrays clf = MyEstimator(empty=np.array([])) clf2 = clone(clf) assert_array_equal(clf.empty, clf2.empty) clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]]))) clf2 = clone(clf) assert_array_equal(clf.empty.data, clf2.empty.data) def test_clone_nan(): # Regression test for cloning estimators with default parameter as np.nan clf = MyEstimator(empty=np.nan) clf2 = clone(clf) assert_true(clf.empty is clf2.empty) def test_repr(): # Smoke test the repr of the base estimator. my_estimator = MyEstimator() repr(my_estimator) test = T(K(), K()) assert_equal( repr(test), "T(a=K(c=None, d=None), b=K(c=None, d=None))" ) some_est = T(a=["long_params"] * 1000) assert_equal(len(repr(some_est)), 415) def test_str(): # Smoke test the str of the base estimator my_estimator = MyEstimator() str(my_estimator) def test_get_params(): test = T(K(), K()) assert_true('a__d' in test.get_params(deep=True)) assert_true('a__d' not in test.get_params(deep=False)) test.set_params(a__d=2) assert_true(test.a.d == 2) assert_raises(ValueError, test.set_params, a__a=2) def test_get_params_deprecated(): # deprecated attribute should not show up as params est = DeprecatedAttributeEstimator(a=1) assert_true('a' in est.get_params()) assert_true('a' in est.get_params(deep=True)) assert_true('a' in est.get_params(deep=False)) assert_true('b' not in est.get_params()) assert_true('b' not in est.get_params(deep=True)) assert_true('b' not in est.get_params(deep=False)) def test_is_classifier(): svc = SVC() assert_true(is_classifier(svc)) assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]}))) assert_true(is_classifier(Pipeline([('svc', svc)]))) assert_true(is_classifier(Pipeline([('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))]))) def test_set_params(): # test nested estimator parameter setting clf = Pipeline([("svc", SVC())]) # non-existing parameter in svc assert_raises(ValueError, clf.set_params, svc__stupid_param=True) # non-existing parameter of pipeline assert_raises(ValueError, clf.set_params, svm__stupid_param=True) # we don't currently catch if the things in pipeline are estimators # bad_pipeline = Pipeline([("bad", NoEstimator())]) # assert_raises(AttributeError, bad_pipeline.set_params, # bad__stupid_param=True) def test_score_sample_weight(): from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn import datasets rng = np.random.RandomState(0) # test both ClassifierMixin and RegressorMixin estimators = [DecisionTreeClassifier(max_depth=2), DecisionTreeRegressor(max_depth=2)] sets = [datasets.load_iris(), datasets.load_boston()] for est, ds in zip(estimators, sets): est.fit(ds.data, ds.target) # generate random sample weights sample_weight = rng.randint(1, 10, size=len(ds.target)) # check that the score with and without sample weights are different assert_not_equal(est.score(ds.data, ds.target), est.score(ds.data, ds.target, sample_weight=sample_weight), msg="Unweighted and weighted scores " "are unexpectedly equal")
bsd-3-clause
suyashbire1/pyhton_scripts_mom6
plottemp1.py
2
1990
import sys import readParams_moreoptions as rdp1 from getvaratz import * import matplotlib.pyplot as plt from mom_plot1 import m6plot from netCDF4 import MFDataset as mfdset, Dataset as dset import time def extractT(geofil,fil,xstart,xend,ystart,yend,zs,ze,meanax,ts=0,te=None, z=None,drhodt=-0.2,rho0=1031.0,savfil=None,plotit=True,loop=True): keepax = () for i in range(4): if i not in meanax: keepax += (i,) fh = mfdset(fil) (xs,xe),(ys,ye),dimh = rdp1.getlatlonindx(fh,wlon=xstart,elon=xend, slat=ystart, nlat=yend,zs=zs,ze=ze) fhgeo = dset(geofil) D = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[0] fhgeo.close() nt = dimh[0].size t0 = time.time() zl = rdp1.getdims(fh)[2][1] if loop: print('Reading data in loop...') e = fh.variables['e'][0:1,zs:ze,ys:ye,xs:xe]/nt for i in range(nt): e += fh.variables['e'][i:i+1,zs:ze,ys:ye,xs:xe]/nt sys.stdout.write('\r'+str(int((i+1)/nt*100))+'% done...') sys.stdout.flush() print('Time taken for data reading: {}s'.format(time.time()-t0)) else: e = fh.variables['e'][ts:te,zs:ze,ys:ye,xs:xe] X = dimh[keepax[1]] Y = dimh[keepax[0]] if 1 in keepax: Y = z if z == None: z = np.linspace(-np.nanmax(D),-1,num=50) Y = z T = getTatz(zl,z,e) T = (T - rho0)/drhodt T = np.ma.apply_over_axes(np.nanmean, T, meanax) P = T.squeeze() data = (X,Y,P) if plotit: Pmax = np.nanmax(P) Pmin = np.nanmin(P) im = m6plot(data,vmax=Pmax,vmin=Pmin,title=r'T at 40N ($^{\circ}$C)', xlabel=r'x ($^{\circ}$)',ylabel='z (m)',bvnorm=True,blevs=15) if savfil: plt.savefig(savfil+'.eps', dpi=300, facecolor='w', edgecolor='w', format='eps', transparent=False, bbox_inches='tight') else: plt.show() else: return data
gpl-3.0
brupoon/stellarpy
stellar.py
3
27875
# -*- coding: utf-8 -*- """ stellarPYL - python stellar spectra processing software Copyright (c) 2016 Brunston Poon @file: stellar This program comes with absolutely no warranty. """ #TODO box apprx 15x5x5 or larger for emission lamp work import numpy as np from PIL import Image from matplotlib import pyplot as plt import scipy.interpolate as spinterp import tools as to import pdb import sys import configparser import time import math import statistics config = configparser.ConfigParser() config.read('settings.ini') v = config['CONTROL']['verbose'] #enables or disables printing of debug def backMedian(img, threshold): """ calculates the median value of 'blackness' in an image under a specific threshold """ lowerx, lowery, upperx, uppery = img.getbbox() back = [] print("calculating backMedian") for x in range(lowerx, upperx): for y in range(lowery, uppery): pixel = img.getpixel((x,y)) pixelSum = pixel[0]+pixel[1]+pixel[2] if pixelSum < threshold: back.append(pixelSum) to.pbar(x/upperx) to.pbar(1) backMedian = statistics.median(back) return backMedian def intensityN(img, data, reg, threshold = 127,r=1): """ Creates a 'proper' intensity array for the data given in a numpy array and using an open Image given in img. Degree offset is calculated by a y = mx + c function as done in regression() regArray = [xvals_n, yvals_n, A, m, c] """ lowerx, lowery, upperx, uppery = img.getbbox() m, c = reg[0:2] n = -1 / m #background subtraction median calculation back = backMedian(img, threshold) if v=='yes': print("backMedian:", back) print("running intensityN") intensities = {} #this is a dictionary. step = math.sqrt((r**2) / (1 + m**2)) for xpixel in np.arange(lowerx, upperx, step): ypixel = m * xpixel + c for newx in np.arange(lowerx, upperx - 1, 0.1): newy = n * (newx - xpixel) + ypixel #point-slope, add ypixel ea.side if (newy > lowery) and (newy < uppery): #anti-aliasing implementation http://is.gd/dnj08y for newxRounded in (math.floor(newx), math.ceil(newx)): for newyRounded in (math.floor(newy), math.ceil(newy)): #we need to be sure that the rounded point is in our img if (newyRounded > lowery) and (newyRounded < uppery): pixel = img.getpixel((newxRounded,newyRounded)) if v=='yes': print("using pixel {0},{1}".format(\ newxRounded,newyRounded)) newValue = pixel[0]+pixel[1]+pixel[2] #to ensure we don't reset a value instead of adding: to.addElement(intensities, xpixel, newValue) intensities[xpixel] -= back to.pbar(xpixel/upperx) #progress bar if v=='yes': print("intensities:", intensities) return intensities def intensitySAAN(img, data, reg, threshold=127, r=1): """ intensitySAAN is the fourth iteration of the intensity function which aims to deal with the plotting of regressed non-orthogonal spectra given in an open image img, the pixel data in data, and a regArray generated using regression(). SAA - spatial antialiasing; N - new Returns a dictionary where key is x value and y value is intensity. r is the step rate along the spectral trace (default to size of one pixel) """ lowerx, lowery, upperx, uppery = img.getbbox() m, c = reg[0:2] n = -1 / m #background subtraction median calculation back = backMedian(img, threshold) if v=='yes': print("backMedian:", back) print("running intensitySAAN") intensities = {} #this is a dictionary. angle = np.arctan(m) step = math.sqrt((r**2) / (1 + m**2)) #for xpixel in np.linspace(lowerx, upperx,num=math.ceil((upperx/step)+1)): for xpixel in np.arange(lowerx, upperx, step): ypixel = m * xpixel + c for newx in np.arange(lowerx, upperx - 1, 0.1): newy = n * (newx - xpixel) + ypixel #point-slope, add ypixel ea.side if (newy > lowery) and (newy < uppery): #anti-aliasing implementation http://is.gd/dnj08y for newxRounded in (math.floor(newx), math.ceil(newx)): for newyRounded in (math.floor(newy), math.ceil(newy)): #we need to be sure that the rounded point is in our img if (newyRounded > lowery) and (newyRounded < uppery): percentNewX = 1 - abs(newx - newxRounded) percentNewY = 1 - abs(newy - newyRounded) percent = percentNewX * percentNewY #get antialiased intensity from pixel pixel = img.getpixel((newxRounded,newyRounded)) newValue = percent * (pixel[0]+pixel[1]+pixel[2]) if v=='yes': print("using pixel {0},{1}".format(\ newxRounded,newyRounded)) if v=='yes': print("value being added:",newValue) #to ensure we don't reset a value instead of adding: to.addElement(intensities, xpixel, newValue) intensities[xpixel] -= percent * back to.pbar(xpixel/upperx) #progress bar if v=='yes': print("intensities:", intensities) return intensities def intensitySAANB(img, data, reg, threshold=127, r=1, twidth=10,spss=0.1): """ intensitySAANB is the sixth iteration of the intensity function which aims to deal with the plotting of regressed non-orthogonal spectra given in an open image img, the pixel data in data, and a regArray generated using regression(). SAA - spatial antialiasing; NB - new, box method Returns a dictionary where key is x value and y value is intensity. r is the step rate along the spectral trace (default to size of one pixel) twidth is the width of the trace on each side of the line y = mx + c so the total will be double spss is the subpixel sampling size """ lowerx, lowery, upperx, uppery = img.getbbox() m, c = reg[0:2] n = -1 / m back = backMedian(img, threshold) if v=='yes': print("backMedian:", back) print("running intensitySAANB") intensities = {} #this is a dictionary. angle = np.arctan(m) step = r * np.cos(angle) for x in np.arange(lowerx, upperx, step): y = m * x + c upperLimitX, lowerLimitX = x + (step/2), x - (step/2) for y2 in np.arange(lowery,uppery,1): #a refers to a point (a,b) on the perp line to mx+c passing thru #both x and y2 a = ((y2 - y) / n) + x ulima, llima = math.floor(a + (step/2)), math.ceil(a - (step/2)) ulimaNR, llimaNR = a + (step/2), a - (step/2) #NR = No Rounding for x2 in np.arange(lowerx,upperx,1): if (x2 < ulima) and (x2 > llima): pixel = img.getpixel((x2,y2)) to.addElement(intensities, x2, pixel) if (x2 == ulima) or (x2 == llima): pixel = img.getpixel((x2,y2)) subpixelCounter = 0 totalPossibleSubpixels = (1/spss)**2 if x2 == ulima: for subpixely in np.arange(x2, x2+1,spss): for subpixelx in np.arange(x2,x2+1,spss): if subpixelx <= ulimaNR: subpixelCounter += 1 if x2 == llima: for subpixely in np.arange(x2, x2+1,spss): for subpixelx in np.arange(x2,x2+1,spss): if subpixelx >= llimaNR: subpixelCounter += 1 percentage = subpixelCounter / totalPossibleSubpixels newValue = pixel * percentage to.addElement(intensities, x2, newValue) to.pbar(xpixel/upperx) #progress bar if v=='yes': print("intensities:", intensities) return intensities def parallel(m, b, d): return b - (d / math.sqrt(1 / m * m + 1)) * (m - 1 / m) def inverseF(m, y, b): return (y - b) / m def intensitySAANS(img, data, reg, threshold=127, r=1, twidth=10,spss=0.1): """ intensitySAANS is the seventh iteration of the intensity function which aims to deal with the plotting of regressed non-orthogonal spectra given in an open image img, the pixel data in data, and a regArray generated using regression(). SAA - spatial antialiasing; Ns - new, scott method Returns a dictionary where key is x value and y value is intensity. r is the step rate along the spectral trace (default to size of one pixel) twidth is the width of the trace on each side of the line y = mx + c so the total will be double spss is the subpixel sampling size """ x1, y1, x2, y2 = img.getbbox() m, c = reg[0:2] n = -1 / m back = backMedian(img, threshold) WIDTH = 3 perpendiculars = [] for p in np.arange(x1, x2, 0.1): # Calculate the corresponding y coordinate to p (called q) on our long axis, #from our regression, where y = mx + b. # (p, q) is a point on our long axis. q = m * p + c # Slope of perpendicular is -1 / m, it's Y intercept is value at f(p). perp_m = -1 / m perp_c = q perpendiculars.append((p, perp_c, parallel(perp_m, perp_c, -WIDTH),\ parallel(perp_m, perp_c, WIDTH))) SAMPLING_FACTOR = 0.50 for x in np.arange(x1, x2, SAMPLING_FACTOR): for y in np.arange(y1, y2, SAMPLING_FACTOR): # For all the perpendiculars to our long axis. for perp in perpendiculars: # Determine if x is between the lines around the perpendicular. if x > inverseF(n, y, perp[2]) and x < inverseF(n, y, perp[3]): pixel = image.getpixel((math.floor(x), math.floor(y))) intensity = (pixel[0] + pixel[1] + pixel[2]) * SAMPLING_FACTOR p = perp[0] # If we have a value for this x value (known as p) on our #long axis, then add it to what we've got. # Remember that the same p value will be picked for #multiple intensities since we are using fractional nx's. if p in intensities: intensities[p] = intensities[p] + intensity else: intensities[p] = intensity for graphx in intensities.keys(): a.append(graphx) b.append(intensities[graphx]) x, y = np.array(a), np.array(b) plt.plot(x, y, 'o', label='Original data', markersize=10) plt.show() return None def intensitySAAW(img, data, reg, threshold=127, r=1,\ twidth=10, binwidth=1, spss=0.5, plot=False): """ intensitySAAW is the eighth iteration of the intensity function which aims to deal with the plotting of regressed non-orthogonal spectra given in an open image img, the pixel data in data, and a regArray generated using regression(). SAA - spatial antialiasing; W - np.where method Returns a one dimensional array where the position indicates x value, value of item indicates y value r is the step rate along the spectral trace (default to size of one pixel) twidth is the width of the trace on each side of the line y = mx + c so the total will be double spss is the subpixel sampling size function will not plot sample images by default, change the value of plot to true to do so. """ lowerx, lowery, upperx, uppery = img.getbbox() m, c = reg[0:2] n = -1 / m back = backMedian(img, threshold) xvals = np.arange(lowerx, upperx, spss) #we want processing to begin in the lower-left corner. yvals = np.arange(lowery, uppery, spss) # print("xvals:",xvals) # print("yvals:",yvals) #map generation - 1st dimension in 2d ndarray is y, explaining weird tuple xMap = np.ones((len(yvals),len(xvals))) yMap = np.ones((len(yvals),len(xvals))) ti = time.time() for i in range(len(yvals)): xMap[i,:] = xMap[i,:] * xvals to.pbar(i/len(yvals)) for i in range(len(xvals)): yMap[:,i] = yMap[:,i] * yvals to.pbar(i/len(xvals)) tf = time.time() print("\nmap generation time:", tf-ti) # print("xMap:",xMap) # print("yMap:",yMap) #map pixels in sub-pixel step size to their respective large pixel #i.e. 1.2, 1.3, 1.9 map to pixels 1, 1, and 2 respectively ti = time.time() xMapInt = xMap.astype(int) yMapInt = yMap.astype(int) tf = time.time() print("map to int:",tf-ti) # print("xMapInt:",xMapInt) # print("yMapInt:",yMapInt) offsetTrace = abs(binwidth * np.sqrt(m**2 + 1) / m) offsetVertical = abs(twidth * np.sqrt(m**2 + 1)) print("offsetTrace",offsetTrace) print("offsetVertical",offsetVertical) binwidthAdjusted = binwidth / np.sqrt(m**2+1) xSize = upperx #I fixed the bin calculation requiremnts (added np.ceil) ti = time.time() pArray = np.zeros(np.ceil((xSize+1-binwidth/2.0)/binwidth)) qArray = np.zeros(np.ceil((xSize+1-binwidth/2.0)/binwidth)) intensities = np.zeros(np.ceil((xSize+1-binwidth/2.0)/binwidth)) tf = time.time() print("zeros matrix generation:", tf-ti) i = 0 for p in np.arange(0.0 + binwidth/2.0, xSize + 1, binwidth): #calculate the y coord of p using our regression y=mx+c q = m * p + c # Slope of perp is -1 / m, Y intercept is the value of the function at p perp_m = -1.0 / m perp_c = q + p/m pArray[i] = p qArray[i] = q # print(i,p,q) offsetHorizontalPositive = (perp_m*xMap + perp_c + offsetTrace) offsetHorizontalNegative = (perp_m*xMap + perp_c - offsetTrace) offsetVerticalPositive = (m*xMap + c + offsetVertical) offsetVerticalNegative = (m*xMap + c - offsetVertical) # print("diffBetween",offsetHorizontalPositive-offsetHorizontalNegative) timeI = time.time() include = np.where((yMap < offsetHorizontalPositive) & \ (yMap >= offsetHorizontalNegative) & \ (yMap < offsetVerticalPositive) & \ (yMap >= offsetVerticalNegative)) timeF = time.time() #print(timeF-timeI) #map sub-pixels back to full pixels # print("include:",include) includedValues = data[[yMapInt[include], xMapInt[include]]] # print("includedValues:",includedValues) #NB! UNLIKE BEFORE, INTENSITIES IS NOT A DICTIONARY, IT IS A 1d ARRAY intensities[i] = np.sum(includedValues) #1d array of our spectra values i += 1 to.pbar(p/(xSize+1)) # print("intensities:\n",intensities) #run plotSamples feeding it required information if plot==True: offsetTuple = (offsetVerticalPositive, offsetVerticalNegative,\ offsetHorizontalPositive, offsetHorizontalNegative) offsetTuple2 = (offsetVertical, offsetTrace) to.plotSamples(img, intensities, reg, offsetTuple2, xMap, yMap) return intensities def sumGenerator(data): """ Creates a 2d matrix of intensity values from a given ndarray data which has values in uint8 RGB form """ new = [] print("creating 2d array from 3d tiff RGB array") pbarCounter = 0 for row in data: rowArray = [] for pixel in row: pixelSum = 0 for value in pixel: pixelSum += value rowArray.append(pixelSum) new.append(rowArray) to.pbar(pbarCounter/len(data)) pbarCounter += 1 to.pbar(1) newNP = np.array(new) return newNP def absResponse(wavelength): """ Would normally have a response function that changes based on the wavelength. In this case, a response function has not been found or created for the Canon 5D Mk I, so we are using a "response function" of 1 across all wavelengths, resulting in no change. """ return 1*wavelength def regression(img, threshold=127): """ Performs least-squares regression fitting on a given image. Returns a tuple: (m,c,xvals_n,yvals_n). tuple[0:2] for just (m,c) """ #point-gathering code lowerx, lowery, upperx, uppery = img.getbbox() xvals, yvals = [], [] print("running regression") for x in range(lowerx, upperx): for y in range(lowery, uppery): pixel = img.getpixel((x,y)) if (pixel[0]+pixel[1]+pixel[2]) > threshold: xvals.append(x) yvals.append(y) to.pbar(x/(upperx+1)) #not 100% #regression code xvals_n, yvals_n = np.array(xvals), np.array(yvals) A = np.vstack([xvals_n, np.ones(len(xvals_n))]).T m,c = np.linalg.lstsq(A, yvals_n)[0] to.pbar(1) #100% if v=='yes': print("M, C:", m,c) return (m,c,xvals_n, yvals_n) def cropN(image, threshold,\ manualTop, manualBot, manualLeft, manualRight, margin): """ Crops image data based on pixels falling below or above a certain threshold. This is an updated version of crop which uses the np.where() command. Crops while considering manual overrides for the cropping limits. """ print("cropping image") data = np.array(image) simplifiedData = sumGenerator(data) yAboveThreshold, xAboveThreshold = np.where(simplifiedData > threshold) #setting the bounds of the image to be min and max of where the image has #pertinent data. Also, adds a margin. lowerx, upperx = np.amin(xAboveThreshold), np.amax(xAboveThreshold) lowery, uppery = np.amin(yAboveThreshold,), np.amax(yAboveThreshold) manualTop, manualBot = manualTop, manualBot manualLeft, manualRight = manualLeft, manualRight if v=='yes': print("lx,ux,ly,uy:{0},{1},{2},{3}".format(lowerx,upperx,lowery,uppery)) #making sure we will not go out of bounds for thing in (lowerx, lowery): if not ((thing - margin) < 0): if v=='yes': print("{0} margin clear! incl margin".format(thing)) thing -= margin else: if v=='yes': print("{0} margin not clear! using orig".format(thing)) for thing in (upperx, uppery): if not ((thing + margin) > (len(simplifiedData) - 1)): if v=='yes': print("{0} margin clear! incl margin".format(thing)) thing += margin else: if v=='yes': print("{0} margin not clear! using orig".format(thing)) #let's check to see if we need to override using the manual selection if (lowerx > manualLeft) and (manualLeft != -1): if v=='yes': print("overriding left") lowerx = manualLeft if (upperx < manualRight) and (manualRight != -1): if v=='yes': print("overriding right") upperx = manualRight if (lowery > manualTop) and (manualTop != -1): if v=='yes': print("overriding top") lowery = manualTop if (uppery < manualBot) and (manualBot != -1): if v=='yes': print("overriding bot") uppery = manualBot finalSelection = data[lowery:(uppery+1),lowerx:(upperx+1)] if v=='yes': print("Final selection from {0} to {1} in x, \ from {2} to {3} in y.".format(\ lowerx, upperx, lowery, uppery)) return finalSelection def crop(image,deletionThreshold,autostopTB, autostopBT, autostopRL, autostopLR): """ (deprecated) Crops image based on the number of empty pixels [0,0,0] Crops top-to-bottom, bottom-to-top, right-to-left, and then left-to-right based on the way that the current set of data has been collected. autostops will stop at a specific column if requested. """ duplicate = np.copy(image) #print("duplicate:\n", duplicate) #these get initialized now and updated in each while loop. numCol = len(duplicate[0]) #number of columns in image numRow = len(duplicate) #number of rows in image #cropping from top toggleTop = True autostopCounterT = 0 while toggleTop == True: a = 0 counterPerRow = 0 for i in range(numCol): if not (np.sum(duplicate[a][i]) <= deletionThreshold): toggleTop = False break else: counterPerRow += 1 if counterPerRow == len(duplicate[a]): #if the entire row of pixels is empty, delete row duplicate = np.delete(duplicate, a, 0) #print("cropping row:", a) if autostopCounterT == autostopTB: toggleTop = False break autostopCounterT += 1 print("beginning bottom crop, top ran fine") to.pbar(.25) #cropping from bottom toggleBot = True autostopCounterB = 0 while toggleBot == True: numRow = len(duplicate) a = numRow-1 counterPerRow = 0 for i in range(numCol): if not (np.sum(duplicate[a][i]) <= deletionThreshold): toggleBot = False break else: counterPerRow += 1 if counterPerRow == numCol: #if the entire row of pixels is empty, delete row duplicate = np.delete(duplicate, a, 0) #print("cropping row:", a) if autostopCounterB == autostopBT: toggleBot = False break autostopCounterB += 1 print("\nbeginning right->left crop, bottom ran fine") to.pbar(.5) #cropping from right to left toggleRight = True autostopCounterR = 0 while toggleRight == True: numRow = len(duplicate) numCol = len(duplicate[0]) #needs to be updated each time loop iterates a = numCol - 1 counterPerCol = 0 for i in range(numRow): if not (np.sum(duplicate[i][a]) <= deletionThreshold): toggleRight = False break else: counterPerCol += 1 if counterPerCol == numRow: #if the entire col of pixels is empty, delete col duplicate = np.delete(duplicate, a, 1) #print("cropping col:", a) if autostopCounterR == autostopRL: toggleRight = False break autostopCounterR += 1 print("\nbeginning left->right crop, right->left ran fine") to.pbar(.75) #cropping from left to right toggleLeft = True autostopCounterL = 0 while toggleLeft == True: numRow = len(duplicate) numCol = len(duplicate[0]) #needs to be updated each time loop iterates a = 0 counterPerCol = 0 for i in range(numRow): if not (np.sum(duplicate[i][a]) <= deletionThreshold): toggleLeft = False break else: counterPerCol += 1 if autostopCounterL == autostopLR: toggleLeft = False break if counterPerCol == numRow: #if the entire col of pixels is empty, delete col duplicate = np.delete(duplicate, a, 1) #print("cropping col:", a) autostopCounterL += 1 #troubleshooting #print("duplicate shape:", duplicate.shape) #print("duplicate dtype:", duplicate.dtype) print("\n") to.pbar(1) return duplicate def response(aintensities, awavelengths, apulkovo, aexposure): """ Generates a camera response function, based on pulkovo wavelengths. pulkovo should be a file imported from vizier with one star's data in it Returns an array with the appropriate adjustment to make for a wavelength with type [w1, w1.5, ... , wn][adj1, adj2, ... , adjn] exposure time in seconds. #TODO currently only is a one dimensional array with adjustments. please fix """ #TODO debugging printing strings print("wavelengths:\n") print(awavelengths) print("intensities:\n") print(aintensities) #TODO DEBUGGING DUMP TO TXT FILE f = open('debug_wavelengths_intensities_pre-interp.txt','w') f.write("#wavelengths intensities\n") for i in range(max(len(awavelengths), len(aintensities))): f.write(str(awavelengths[i])+" "+str(aintensities[i])+"\n") f.close() print("debug file written to debug_wavelengths_intensities_pre-interp.txt") star = np.loadtxt(apulkovo) #turning strings into floats for i in range(len(star)): star[i][0] = float(star[i][0]) star[i][1] = float(star[i][1]) adjustmentArray = [] #Divide by exposure time to get energy / time = power for i in range(len(aintensities)-1): aintensities[i] = aintensities[i]/aexposure #generate blank arrays for filling by next for loop x_star = np.zeros(len(star)) y_star = np.zeros(len(star)) #places pulkovo data into separate arrays for use in interpolation functions for i in range(len(star)): x_star[i] = float(star[i][0]) y_star[i] = float(star[i][1]) #Plotting the pulkovo data. plt.figure(2) plt.clf() plt.plot(x_star, y_star,'o',label='original data',markersize=4) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title("Pulkovo Data from {0}".format(apulkovo)) plt.show() #TODO debugging text file literature/ pulkovo values f = open('debug_literature-pulkovo_pre-interp.txt','w') f.write("#pulkovo_wavelengths pulkovo_intensities\n") for i in range(max(len(x_star),len(y_star))): f.write(str(x_star[i])+" "+str(y_star[i])+"\n") f.close() print("debug file written to debug_literature-pulkovo_pre-interp.txt") #our interpolation function interpFunc = spinterp.interp1d(x_star, y_star, kind="linear",\ fill_value = -1, bounds_error = False) #TODO nearest neighbor is supposed to be temporary. #TODO weird double values in wavelengths? try removing? new_wavelengths = [] for item in range(0,len(awavelengths), 2): new_wavelengths.append(awavelengths[item]) new_wavelengths = np.array(new_wavelengths) interpolatedY = interpFunc(awavelengths) #TODO TEMP TEST #interpolatedY = interpFunc(new_wavelengths) #TODO Debugging print statements #input("wavelengths and interpolatedY to text file, enter to continue") #f = open('wavelengths_interpolatedy.txt','w') #f.write("wavelengths\n") #for item in wavelengths: # f.write(str(wavelengths[item])) # f.write("\n") #f.write("interpolatedY\n") #for item in interpolatedY: # f.write(str(interpolatedY[item])) # f.write("\n") #f.close() #for item in wavelengths: # print(wavelengths[item]) # for item in new_wavelengths: # print(item) # print("interpolatedY") # for item in interpolatedY: # print(item) for nm in range(len(aintensities)):#TODO TEMP TEST CHANGE BACK TO NEW_W # find closest to wavelength in pulkovo # divide value # add to adjustmentArray adjustmentArray.append(interpolatedY[nm]/aintensities[nm]) adjustmentArrayND=np.array(adjustmentArray) #TODO Debugging # for item in adjustmentArrayND: # print(item) return adjustmentArrayND
mit
srinathv/bokeh
examples/charts/file/stocks_timeseries.py
6
1238
from collections import OrderedDict import pandas as pd from bokeh._legacy_charts import TimeSeries, show, output_file # read in some stock data from the Yahoo Finance API AAPL = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010", parse_dates=['Date']) MSFT = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010", parse_dates=['Date']) IBM = pd.read_csv( "http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010", parse_dates=['Date']) xyvalues = OrderedDict( AAPL=AAPL['Adj Close'], Date=AAPL['Date'], MSFT=MSFT['Adj Close'], IBM=IBM['Adj Close'], ) # any of the following commented are valid Bar inputs #xyvalues = pd.DataFrame(xyvalues) #lindex = xyvalues.pop('Date') #lxyvalues = list(xyvalues.values()) #lxyvalues = np.array(xyvalues.values()) TOOLS="resize,pan,wheel_zoom,box_zoom,reset,previewsave" output_file("stocks_timeseries.html") ts = TimeSeries( xyvalues, index='Date', legend=True, title="Timeseries", tools=TOOLS, ylabel='Stock Prices') # usage with iterable index #ts = TimeSeries( # lxyvalues, index=lindex, # title="timeseries, pd_input", ylabel='Stock Prices') show(ts)
bsd-3-clause
tboch/mocpy
mocpy/moc/plot/border.py
1
3261
import numpy as np from astropy.coordinates import ICRS, SkyCoord from astropy.wcs.utils import skycoord_to_pixel from matplotlib.path import Path from matplotlib.patches import PathPatch import cdshealpix from ... import core def border(moc, ax, wcs, **kw_mpl_pathpatch): from .utils import build_plotting_moc moc_to_plot = build_plotting_moc(moc, wcs) if moc_to_plot.empty(): return max_order = moc_to_plot.max_order ipixels_open = core.flatten_pixels(moc_to_plot._interval_set._intervals, moc_to_plot.max_order) # Take the complement if the MOC covers more than half of the sky num_ipixels = 3 << (2*(max_order + 1)) sky_fraction = ipixels_open.shape[0] / float(num_ipixels) if sky_fraction > 0.5: ipixels_all = np.arange(num_ipixels) ipixels_open = np.setdiff1d(ipixels_all, ipixels_open, assume_unique=True) neighbors = cdshealpix.neighbours(ipixels_open, max_order).T # Select the direct neighbors (i.e. those in WEST, NORTH, EAST and SOUTH directions) neighbors = neighbors[[3, 7, 5, 1], :] ipix_moc = np.isin(neighbors, ipixels_open) west_edge = ipix_moc[0, :] south_edge = ipix_moc[1, :] east_edge = ipix_moc[2, :] north_edge = ipix_moc[3, :] num_ipix_moc = ipix_moc.sum(axis=0) ipixels_border_id = (num_ipix_moc < 4) # The border of each HEALPix cells is drawn one at a time path_vertices_l = [] codes = [] west_border = west_edge[ipixels_border_id] south_border = south_edge[ipixels_border_id] east_border = east_edge[ipixels_border_id] north_border = north_edge[ipixels_border_id] ipixels_border = ipixels_open[ipixels_border_id] ipix_lon_boundaries, ipix_lat_boundaries = cdshealpix.vertices(ipixels_border, max_order) ipix_boundaries = SkyCoord(ipix_lon_boundaries, ipix_lat_boundaries, frame=ICRS()) # Projection on the given WCS xp, yp = skycoord_to_pixel(coords=ipix_boundaries, wcs=wcs) from . import culling_backfacing_cells xp, yp, frontface_id = culling_backfacing_cells.backface_culling(xp, yp) west_border = west_border[frontface_id] south_border = south_border[frontface_id] east_border = east_border[frontface_id] north_border = north_border[frontface_id] for i in range(xp.shape[0]): vx = xp[i] vy = yp[i] if not north_border[i]: path_vertices_l += [(vx[0], vy[0]), (vx[1], vy[1]), (0, 0)] codes += [Path.MOVETO] + [Path.LINETO] + [Path.CLOSEPOLY] if not east_border[i]: path_vertices_l += [(vx[1], vy[1]), (vx[2], vy[2]), (0, 0)] codes += [Path.MOVETO] + [Path.LINETO] + [Path.CLOSEPOLY] if not south_border[i]: path_vertices_l += [(vx[2], vy[2]), (vx[3], vy[3]), (0, 0)] codes += [Path.MOVETO] + [Path.LINETO] + [Path.CLOSEPOLY] if not west_border[i]: path_vertices_l += [(vx[3], vy[3]), (vx[0], vy[0]), (0, 0)] codes += [Path.MOVETO] + [Path.LINETO] + [Path.CLOSEPOLY] path = Path(path_vertices_l, codes) perimeter_patch = PathPatch(path, **kw_mpl_pathpatch) ax.add_patch(perimeter_patch) from . import axis_viewport axis_viewport.set(ax, wcs)
gpl-3.0
Kazade/NeHe-Website
google_appengine/google/appengine/tools/dev_appserver_import_hook.py
5
50875
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Import hook for dev_appserver.py.""" import dummy_thread import errno import imp import inspect import itertools import locale import logging import mimetypes import os import pickle import random import re import sys import urllib try: import distutils.util except ImportError: pass from google.appengine import dist from google.appengine import dist27 as dist27 from google.appengine.api import appinfo SITE_PACKAGES = os.path.normcase(os.path.join(os.path.dirname(os.__file__), 'site-packages')) import google.appengine SDK_ROOT = os.path.dirname(os.path.dirname(os.path.dirname( google.appengine.__file__))) CODING_COOKIE_RE = re.compile("coding[:=]\s*([-\w.]+)") DEFAULT_ENCODING = 'ascii' def FakeURandom(n): """Fake version of os.urandom.""" bytes = '' for _ in range(n): bytes += chr(random.randint(0, 255)) return bytes def FakeUname(): """Fake version of os.uname.""" return ('Linux', '', '', '', '') def FakeUnlink(path): """Fake version of os.unlink.""" if os.path.isdir(path): raise OSError(errno.ENOENT, "Is a directory", path) else: raise OSError(errno.EPERM, "Operation not permitted", path) def FakeReadlink(path): """Fake version of os.readlink.""" raise OSError(errno.EINVAL, "Invalid argument", path) def FakeAccess(path, mode): """Fake version of os.access where only reads are supported.""" if not os.path.exists(path) or mode != os.R_OK: return False else: return True def FakeSetLocale(category, value=None, original_setlocale=locale.setlocale): """Fake version of locale.setlocale that only supports the default.""" if value not in (None, '', 'C', 'POSIX'): raise locale.Error('locale emulation only supports "C" locale') return original_setlocale(category, 'C') def FakeOpen(filename, flags, mode=0777): """Fake version of os.open.""" raise OSError(errno.EPERM, "Operation not permitted", filename) def FakeRename(src, dst): """Fake version of os.rename.""" raise OSError(errno.EPERM, "Operation not permitted", src) def FakeUTime(path, times): """Fake version of os.utime.""" raise OSError(errno.EPERM, "Operation not permitted", path) def FakeGetPlatform(): """Fake distutils.util.get_platform on OS/X. Pass-through otherwise.""" if sys.platform == 'darwin': return 'macosx-' else: return distutils.util.get_platform() def FakeCryptoRandomOSRNGnew(*args, **kwargs): from Crypto.Random.OSRNG import fallback return fallback.new(*args, **kwargs) def NeedsMacOSXProxyFakes(): """Returns True if the MacOS X urllib fakes should be installed.""" return (sys.platform == 'darwin' and (2, 6, 0) <= sys.version_info < (2, 6, 4)) if NeedsMacOSXProxyFakes(): def _FakeProxyBypassHelper(fn, original_module_dict=sys.modules.copy(), original_uname=os.uname): """Setups and restores the state for the Mac OS X urllib fakes.""" def Inner(*args, **kwargs): current_uname = os.uname current_meta_path = sys.meta_path[:] current_modules = sys.modules.copy() try: sys.modules.clear() sys.modules.update(original_module_dict) sys.meta_path[:] = [] os.uname = original_uname return fn(*args, **kwargs) finally: sys.modules.clear() sys.modules.update(current_modules) os.uname = current_uname sys.meta_path[:] = current_meta_path return Inner @_FakeProxyBypassHelper def FakeProxyBypassMacOSXSysconf( host, original_proxy_bypass_macosx_sysconf=urllib.proxy_bypass_macosx_sysconf): """Fake for urllib.proxy_bypass_macosx_sysconf for Python 2.6.0 to 2.6.3.""" return original_proxy_bypass_macosx_sysconf(host) @_FakeProxyBypassHelper def FakeGetProxiesMacOSXSysconf( original_getproxies_macosx_sysconf=urllib.getproxies_macosx_sysconf): """Fake for urllib.getproxies_macosx_sysconf for Python 2.6.0 to 2.6.3.""" return original_getproxies_macosx_sysconf() def IsPathInSubdirectories(filename, subdirectories, normcase=os.path.normcase): """Determines if a filename is contained within one of a set of directories. Args: filename: Path of the file (relative or absolute). subdirectories: Iterable collection of paths to subdirectories which the given filename may be under. normcase: Used for dependency injection. Returns: True if the supplied filename is in one of the given sub-directories or its hierarchy of children. False otherwise. """ file_dir = normcase(os.path.dirname(os.path.abspath(filename))) for parent in subdirectories: fixed_parent = normcase(os.path.abspath(parent)) if os.path.commonprefix([file_dir, fixed_parent]) == fixed_parent: return True return False def GeneratePythonPaths(*p): """Generate all valid filenames for the given file. Args: p: Positional args are the folders to the file and finally the file without a suffix. Returns: A list of strings representing the given path to a file with each valid suffix for this python build. """ suffixes = imp.get_suffixes() return [os.path.join(*p) + s for s, m, t in suffixes] class FakeFile(file): """File sub-class that enforces the security restrictions of the production environment. """ ALLOWED_MODES = frozenset(['r', 'rb', 'U', 'rU']) ALLOWED_FILES = set(os.path.normcase(filename) for filename in mimetypes.knownfiles if os.path.isfile(filename)) ALLOWED_FILES_RE = set([re.compile(r'.*/python27.zip$')]) ALLOWED_DIRS = set([ os.path.normcase(os.path.realpath(os.path.dirname(os.__file__))), os.path.normcase(os.path.abspath(os.path.dirname(os.__file__))), os.path.normcase(os.path.dirname(os.path.realpath(os.__file__))), os.path.normcase(os.path.dirname(os.path.abspath(os.__file__))), ]) os_source_location = inspect.getsourcefile(os) if os_source_location is not None: ALLOWED_DIRS.update([ os.path.normcase(os.path.realpath(os.path.dirname(os_source_location))), os.path.normcase(os.path.abspath(os.path.dirname(os_source_location))), os.path.normcase(os.path.dirname(os.path.realpath(os_source_location))), os.path.normcase(os.path.dirname(os.path.abspath(os_source_location))), ]) NOT_ALLOWED_DIRS = set([ SITE_PACKAGES, ]) ALLOWED_SITE_PACKAGE_DIRS = set( os.path.normcase(os.path.abspath(os.path.join(SITE_PACKAGES, path))) for path in [ ]) ALLOWED_SITE_PACKAGE_FILES = set( os.path.normcase(os.path.abspath(os.path.join( os.path.dirname(os.__file__), 'site-packages', path))) for path in itertools.chain(*[ [os.path.join('Crypto')], GeneratePythonPaths('Crypto', '__init__'), GeneratePythonPaths('Crypto', 'pct_warnings'), [os.path.join('Crypto', 'Cipher')], GeneratePythonPaths('Crypto', 'Cipher', '__init__'), GeneratePythonPaths('Crypto', 'Cipher', 'blockalgo'), GeneratePythonPaths('Crypto', 'Cipher', 'AES'), GeneratePythonPaths('Crypto', 'Cipher', 'ARC2'), GeneratePythonPaths('Crypto', 'Cipher', 'ARC4'), GeneratePythonPaths('Crypto', 'Cipher', 'Blowfish'), GeneratePythonPaths('Crypto', 'Cipher', 'CAST'), GeneratePythonPaths('Crypto', 'Cipher', 'DES'), GeneratePythonPaths('Crypto', 'Cipher', 'DES3'), GeneratePythonPaths('Crypto', 'Cipher', 'PKCS1_OAEP'), GeneratePythonPaths('Crypto', 'Cipher', 'PKCS1_v1_5'), GeneratePythonPaths('Crypto', 'Cipher', 'XOR'), [os.path.join('Crypto', 'Hash')], GeneratePythonPaths('Crypto', 'Hash', '__init__'), GeneratePythonPaths('Crypto', 'Hash', 'hashalgo'), GeneratePythonPaths('Crypto', 'Hash', 'HMAC'), GeneratePythonPaths('Crypto', 'Hash', 'MD2'), GeneratePythonPaths('Crypto', 'Hash', 'MD4'), GeneratePythonPaths('Crypto', 'Hash', 'MD5'), GeneratePythonPaths('Crypto', 'Hash', 'SHA'), GeneratePythonPaths('Crypto', 'Hash', 'SHA224'), GeneratePythonPaths('Crypto', 'Hash', 'SHA256'), GeneratePythonPaths('Crypto', 'Hash', 'SHA384'), GeneratePythonPaths('Crypto', 'Hash', 'SHA512'), GeneratePythonPaths('Crypto', 'Hash', 'RIPEMD'), [os.path.join('Crypto', 'Protocol')], GeneratePythonPaths('Crypto', 'Protocol', '__init__'), GeneratePythonPaths('Crypto', 'Protocol', 'AllOrNothing'), GeneratePythonPaths('Crypto', 'Protocol', 'Chaffing'), GeneratePythonPaths('Crypto', 'Protocol', 'KDF'), [os.path.join('Crypto', 'PublicKey')], GeneratePythonPaths('Crypto', 'PublicKey', '__init__'), GeneratePythonPaths('Crypto', 'PublicKey', 'DSA'), GeneratePythonPaths('Crypto', 'PublicKey', '_DSA'), GeneratePythonPaths('Crypto', 'PublicKey', 'ElGamal'), GeneratePythonPaths('Crypto', 'PublicKey', 'RSA'), GeneratePythonPaths('Crypto', 'PublicKey', '_RSA'), GeneratePythonPaths('Crypto', 'PublicKey', 'pubkey'), GeneratePythonPaths('Crypto', 'PublicKey', 'qNEW'), GeneratePythonPaths('Crypto', 'PublicKey', '_slowmath'), [os.path.join('Crypto', 'Random')], GeneratePythonPaths('Crypto', 'Random', '__init__'), GeneratePythonPaths('Crypto', 'Random', 'random'), GeneratePythonPaths('Crypto', 'Random', '_UserFriendlyRNG'), [os.path.join('Crypto', 'Random', 'OSRNG')], GeneratePythonPaths('Crypto', 'Random', 'OSRNG', '__init__'), GeneratePythonPaths('Crypto', 'Random', 'OSRNG', 'fallback'), GeneratePythonPaths('Crypto', 'Random', 'OSRNG', 'nt'), GeneratePythonPaths('Crypto', 'Random', 'OSRNG', 'posix'), GeneratePythonPaths('Crypto', 'Random', 'OSRNG', 'rng_base'), [os.path.join('Crypto', 'Random', 'Fortuna')], GeneratePythonPaths('Crypto', 'Random', 'Fortuna', '__init__'), GeneratePythonPaths('Crypto', 'Random', 'Fortuna', 'FortunaAccumulator'), GeneratePythonPaths('Crypto', 'Random', 'Fortuna', 'FortunaGenerator'), GeneratePythonPaths('Crypto', 'Random', 'Fortuna', 'SHAd256'), [os.path.join('Crypto', 'Signature')], GeneratePythonPaths('Crypto', 'Signature', '__init__'), GeneratePythonPaths('Crypto', 'Signature', 'PKCS1_PSS'), GeneratePythonPaths('Crypto', 'Signature', 'PKCS1_v1_5'), [os.path.join('Crypto', 'Util')], GeneratePythonPaths('Crypto', 'Util', '__init__'), GeneratePythonPaths('Crypto', 'Util', 'asn1'), GeneratePythonPaths('Crypto', 'Util', 'Counter'), GeneratePythonPaths('Crypto', 'Util', 'RFC1751'), GeneratePythonPaths('Crypto', 'Util', 'number'), GeneratePythonPaths('Crypto', 'Util', '_number_new'), GeneratePythonPaths('Crypto', 'Util', 'py3compat'), GeneratePythonPaths('Crypto', 'Util', 'python_compat'), GeneratePythonPaths('Crypto', 'Util', 'randpool'), ])) _original_file = file _root_path = None _application_paths = None _skip_files = None _static_file_config_matcher = None _allow_skipped_files = True _availability_cache = {} @staticmethod def SetAllowedPaths(root_path, application_paths): """Configures which paths are allowed to be accessed. Must be called at least once before any file objects are created in the hardened environment. Args: root_path: Absolute path to the root of the application. application_paths: List of additional paths that the application may access, this must include the App Engine runtime but not the Python library directories. """ FakeFile._application_paths = (set(os.path.realpath(path) for path in application_paths) | set(os.path.abspath(path) for path in application_paths)) FakeFile._application_paths.add(root_path) FakeFile._root_path = os.path.join(root_path, '') FakeFile._availability_cache = {} @staticmethod def SetAllowSkippedFiles(allow_skipped_files): """Configures access to files matching FakeFile._skip_files. Args: allow_skipped_files: Boolean whether to allow access to skipped files """ FakeFile._allow_skipped_files = allow_skipped_files FakeFile._availability_cache = {} @staticmethod def SetAllowedModule(name): """Allow the use of a module based on where it is located. Meant to be used by use_library() so that it has a link back into the trusted part of the interpreter. Args: name: Name of the module to allow. """ stream, pathname, description = imp.find_module(name) pathname = os.path.normcase(os.path.abspath(pathname)) if stream: stream.close() FakeFile.ALLOWED_FILES.add(pathname) FakeFile.ALLOWED_FILES.add(os.path.realpath(pathname)) else: assert description[2] == imp.PKG_DIRECTORY if pathname.startswith(SITE_PACKAGES): FakeFile.ALLOWED_SITE_PACKAGE_DIRS.add(pathname) FakeFile.ALLOWED_SITE_PACKAGE_DIRS.add(os.path.realpath(pathname)) else: FakeFile.ALLOWED_DIRS.add(pathname) FakeFile.ALLOWED_DIRS.add(os.path.realpath(pathname)) @staticmethod def SetSkippedFiles(skip_files): """Sets which files in the application directory are to be ignored. Must be called at least once before any file objects are created in the hardened environment. Must be called whenever the configuration was updated. Args: skip_files: Object with .match() method (e.g. compiled regexp). """ FakeFile._skip_files = skip_files FakeFile._availability_cache = {} @staticmethod def SetStaticFileConfigMatcher(static_file_config_matcher): """Sets StaticFileConfigMatcher instance for checking if a file is static. Must be called at least once before any file objects are created in the hardened environment. Must be called whenever the configuration was updated. Args: static_file_config_matcher: StaticFileConfigMatcher instance. """ FakeFile._static_file_config_matcher = static_file_config_matcher FakeFile._availability_cache = {} @staticmethod def IsFileAccessible(filename, normcase=os.path.normcase, py27_optional=False): """Determines if a file's path is accessible. SetAllowedPaths(), SetSkippedFiles() and SetStaticFileConfigMatcher() must be called before this method or else all file accesses will raise an error. Args: filename: Path of the file to check (relative or absolute). May be a directory, in which case access for files inside that directory will be checked. normcase: Used for dependency injection. py27_optional: Whether the filename being checked matches the name of an optional python27 runtime library. Returns: True if the file is accessible, False otherwise. """ logical_filename = normcase(os.path.abspath(filename)) result = FakeFile._availability_cache.get(logical_filename) if result is None: result = FakeFile._IsFileAccessibleNoCache(logical_filename, normcase=normcase, py27_optional=py27_optional) FakeFile._availability_cache[logical_filename] = result return result @staticmethod def _IsFileAccessibleNoCache(logical_filename, normcase=os.path.normcase, py27_optional=False): """Determines if a file's path is accessible. This is an internal part of the IsFileAccessible implementation. Args: logical_filename: Absolute path of the file to check. normcase: Used for dependency injection. py27_optional: Whether the filename being checked matches the name of an optional python27 runtime library. Returns: True if the file is accessible, False otherwise. """ logical_dirfakefile = logical_filename is_dir = False if os.path.isdir(logical_filename): logical_dirfakefile = os.path.join(logical_filename, 'foo') is_dir = True if IsPathInSubdirectories(logical_dirfakefile, [FakeFile._root_path], normcase=normcase): relative_filename = logical_dirfakefile[len(FakeFile._root_path):] if not FakeFile._allow_skipped_files: path = relative_filename if is_dir: path = os.path.dirname(path) while path != os.path.dirname(path): if FakeFile._skip_files.match(path): logging.warning('Blocking access to skipped file "%s"', logical_filename) return False path = os.path.dirname(path) if FakeFile._static_file_config_matcher.IsStaticFile(relative_filename): logging.warning('Blocking access to static file "%s"', logical_filename) return False if py27_optional: return True if logical_filename in FakeFile.ALLOWED_FILES: return True for regex in FakeFile.ALLOWED_FILES_RE: match = regex.match(logical_filename) if match and match.end() == len(logical_filename): return True if logical_filename in FakeFile.ALLOWED_SITE_PACKAGE_FILES: return True if IsPathInSubdirectories(logical_dirfakefile, FakeFile.ALLOWED_SITE_PACKAGE_DIRS, normcase=normcase): return True allowed_dirs = FakeFile._application_paths | FakeFile.ALLOWED_DIRS if (IsPathInSubdirectories(logical_dirfakefile, allowed_dirs, normcase=normcase) and not IsPathInSubdirectories(logical_dirfakefile, FakeFile.NOT_ALLOWED_DIRS, normcase=normcase)): return True return False def __init__(self, filename, mode='r', bufsize=-1, **kwargs): """Initializer. See file built-in documentation.""" if mode not in FakeFile.ALLOWED_MODES: raise IOError('invalid mode: %s' % mode) if not FakeFile.IsFileAccessible(filename): raise IOError(errno.EACCES, 'file not accessible', filename) super(FakeFile, self).__init__(filename, mode, bufsize, **kwargs) dist._library.SetAllowedModule = FakeFile.SetAllowedModule class RestrictedPathFunction(object): """Enforces access restrictions for functions that have a file or directory path as their first argument.""" _original_os = os def __init__(self, original_func): """Initializer. Args: original_func: Callable that takes as its first argument the path to a file or directory on disk; all subsequent arguments may be variable. """ self._original_func = original_func def __call__(self, path, *args, **kwargs): """Enforces access permissions for the function passed to the constructor. """ if not FakeFile.IsFileAccessible(path): raise OSError(errno.EACCES, 'path not accessible', path) return self._original_func(path, *args, **kwargs) def GetSubmoduleName(fullname): """Determines the leaf submodule name of a full module name. Args: fullname: Fully qualified module name, e.g. 'foo.bar.baz' Returns: Submodule name, e.g. 'baz'. If the supplied module has no submodule (e.g., 'stuff'), the returned value will just be that module name ('stuff'). """ return fullname.rsplit('.', 1)[-1] class CouldNotFindModuleError(ImportError): """Raised when a module could not be found. In contrast to when a module has been found, but cannot be loaded because of hardening restrictions. """ class Py27OptionalModuleError(ImportError): """Raised for error conditions relating to optional Python 2.7 modules.""" def Trace(func): """Call stack logging decorator for HardenedModulesHook class. This decorator logs the call stack of the HardenedModulesHook class as it executes, indenting logging messages based on the current stack depth. Args: func: the function to decorate. Returns: The decorated function. """ def Decorate(self, *args, **kwargs): args_to_show = [] if args is not None: args_to_show.extend(str(argument) for argument in args) if kwargs is not None: args_to_show.extend('%s=%s' % (key, value) for key, value in kwargs.iteritems()) args_string = ', '.join(args_to_show) self.log('Entering %s(%s)', func.func_name, args_string) self._indent_level += 1 try: return func(self, *args, **kwargs) finally: self._indent_level -= 1 self.log('Exiting %s(%s)', func.func_name, args_string) return Decorate class HardenedModulesHook(object): """Meta import hook that restricts the modules used by applications to match the production environment. Module controls supported: - Disallow native/extension modules from being loaded - Disallow built-in and/or Python-distributed modules from being loaded - Replace modules with completely empty modules - Override specific module attributes - Replace one module with another After creation, this object should be added to the front of the sys.meta_path list (which may need to be created). The sys.path_importer_cache dictionary should also be cleared, to prevent loading any non-restricted modules. See PEP302 for more info on how this works: http://www.python.org/dev/peps/pep-0302/ """ ENABLE_LOGGING = False def log(self, message, *args): """Logs an import-related message to stderr, with indentation based on current call-stack depth. Args: message: Logging format string. args: Positional format parameters for the logging message. """ if HardenedModulesHook.ENABLE_LOGGING: indent = self._indent_level * ' ' print >>sys.__stderr__, indent + (message % args) _WHITE_LIST_C_MODULES = [ 'py_streamhtmlparser', 'AES', 'ARC2', 'ARC4', 'Blowfish', 'CAST', 'DES', 'DES3', 'MD2', 'MD4', 'RIPEMD', 'RIPEMD160', 'SHA256', 'XOR', '_AES', '_ARC2', '_ARC4', '_Blowfish', '_CAST', '_DES', '_DES3', '_MD2', '_MD4', '_RIPEMD160', '_SHA224', '_SHA256', '_SHA384', '_SHA512', '_XOR', '_Crypto_Cipher__AES', '_Crypto_Cipher__ARC2', '_Crypto_Cipher__ARC4', '_Crypto_Cipher__Blowfish', '_Crypto_Cipher__CAST', '_Crypto_Cipher__DES', '_Crypto_Cipher__DES3', '_Crypto_Cipher__XOR', '_Crypto_Hash__MD2', '_Crypto_Hash__MD4', '_Crypto_Hash__RIPEMD', '_Crypto_Hash__SHA256', 'array', 'binascii', 'bz2', 'cmath', 'collections', 'crypt', 'cStringIO', 'datetime', 'errno', 'exceptions', 'gc', 'itertools', 'math', 'md5', 'operator', 'posix', 'posixpath', 'pyexpat', 'sha', 'struct', 'strxor', 'sys', 'time', 'timing', 'unicodedata', 'zlib', '_ast', '_bisect', '_codecs', '_codecs_cn', '_codecs_hk', '_codecs_iso2022', '_codecs_jp', '_codecs_kr', '_codecs_tw', '_collections', '_counter', '_csv', '_elementtree', '_fastmath', '_functools', '_hashlib', '_heapq', '_io', '_locale', '_lsprof', '_md5', '_multibytecodec', '_scproxy', '_random', '_sha', '_sha256', '_sha512', '_sre', '_struct', '_types', '_weakref', '__main__', ] _PY27_ALLOWED_MODULES = [ '_bytesio', '_fileio', '_json', '_symtable', '_yaml', 'parser', 'strop', ] __PY27_OPTIONAL_ALLOWED_MODULES = { 'django': [], 'endpoints': [], 'jinja2': ['_debugsupport', '_speedups'], 'lxml': ['etree', 'objectify'], 'markupsafe': ['_speedups'], 'matplotlib': [ 'ft2font', 'ttconv', '_png', '_backend_agg', '_path', '_image', '_cntr', 'nxutils', '_delaunay', '_tri', ], 'numpy': [ '_capi', '_compiled_base', '_dotblas', 'fftpack_lite', 'lapack_lite', 'mtrand', 'multiarray', 'scalarmath', '_sort', 'umath', 'umath_tests', ], 'PIL': ['_imaging', '_imagingcms', '_imagingft', '_imagingmath'], 'setuptools': [], } __CRYPTO_CIPHER_ALLOWED_MODULES = [ 'MODE_CBC', 'MODE_CFB', 'MODE_CTR', 'MODE_ECB', 'MODE_OFB', 'block_size', 'key_size', 'new', ] _WHITE_LIST_PARTIAL_MODULES = { 'Crypto.Cipher.AES': __CRYPTO_CIPHER_ALLOWED_MODULES, 'Crypto.Cipher.ARC2': __CRYPTO_CIPHER_ALLOWED_MODULES, 'Crypto.Cipher.Blowfish': __CRYPTO_CIPHER_ALLOWED_MODULES, 'Crypto.Cipher.CAST': __CRYPTO_CIPHER_ALLOWED_MODULES, 'Crypto.Cipher.DES': __CRYPTO_CIPHER_ALLOWED_MODULES, 'Crypto.Cipher.DES3': __CRYPTO_CIPHER_ALLOWED_MODULES, 'gc': [ 'enable', 'disable', 'isenabled', 'collect', 'get_debug', 'set_threshold', 'get_threshold', 'get_count' ], 'os': [ 'access', 'altsep', 'curdir', 'defpath', 'devnull', 'environ', 'error', 'extsep', 'EX_NOHOST', 'EX_NOINPUT', 'EX_NOPERM', 'EX_NOUSER', 'EX_OK', 'EX_OSERR', 'EX_OSFILE', 'EX_PROTOCOL', 'EX_SOFTWARE', 'EX_TEMPFAIL', 'EX_UNAVAILABLE', 'EX_USAGE', 'F_OK', 'getcwd', 'getcwdu', 'getenv', 'listdir', 'lstat', 'name', 'NGROUPS_MAX', 'O_APPEND', 'O_CREAT', 'O_DIRECT', 'O_DIRECTORY', 'O_DSYNC', 'O_EXCL', 'O_LARGEFILE', 'O_NDELAY', 'O_NOCTTY', 'O_NOFOLLOW', 'O_NONBLOCK', 'O_RDONLY', 'O_RDWR', 'O_RSYNC', 'O_SYNC', 'O_TRUNC', 'O_WRONLY', 'open', 'pardir', 'path', 'pathsep', 'R_OK', 'readlink', 'remove', 'rename', 'SEEK_CUR', 'SEEK_END', 'SEEK_SET', 'sep', 'stat', 'stat_float_times', 'stat_result', 'strerror', 'TMP_MAX', 'unlink', 'urandom', 'utime', 'walk', 'WCOREDUMP', 'WEXITSTATUS', 'WIFEXITED', 'WIFSIGNALED', 'WIFSTOPPED', 'WNOHANG', 'WSTOPSIG', 'WTERMSIG', 'WUNTRACED', 'W_OK', 'X_OK', '_get_exports_list', ], 'signal': [ ], 'ssl': [ ], } _MODULE_OVERRIDES = { 'locale': { 'setlocale': FakeSetLocale, }, 'os': { 'access': FakeAccess, 'listdir': RestrictedPathFunction(os.listdir), 'lstat': RestrictedPathFunction(os.stat), 'open': FakeOpen, 'readlink': FakeReadlink, 'remove': FakeUnlink, 'rename': FakeRename, 'stat': RestrictedPathFunction(os.stat), 'uname': FakeUname, 'unlink': FakeUnlink, 'urandom': FakeURandom, 'utime': FakeUTime, }, 'signal': { '__doc__': None, }, 'distutils.util': { 'get_platform': FakeGetPlatform, }, 'Crypto.Random.OSRNG': { 'new': FakeCryptoRandomOSRNGnew, }, } _ENABLED_FILE_TYPES = ( imp.PKG_DIRECTORY, imp.PY_SOURCE, imp.PY_COMPILED, imp.C_BUILTIN, ) def __init__(self, config, module_dict, app_code_path, imp_module=imp, os_module=os, dummy_thread_module=dummy_thread, pickle_module=pickle): """Initializer. Args: config: AppInfoExternal instance representing the parsed app.yaml file. module_dict: Module dictionary to use for managing system modules. Should be sys.modules. app_code_path: The absolute path to the application code on disk. imp_module, os_module, dummy_thread_module, etc.: References to modules that exist in the dev_appserver that must be used by this class in order to function, even if these modules have been unloaded from sys.modules. """ self._config = config self._module_dict = module_dict self._imp = imp_module self._os = os_module self._dummy_thread = dummy_thread_module self._pickle = pickle self._indent_level = 0 self._app_code_path = app_code_path self._white_list_c_modules = list(self._WHITE_LIST_C_MODULES) self._white_list_partial_modules = dict(self._WHITE_LIST_PARTIAL_MODULES) self._enabled_modules = [] if self._config and self._config.runtime == 'python27': self._white_list_c_modules.extend(self._PY27_ALLOWED_MODULES) self._white_list_partial_modules['os'] = ( list(self._white_list_partial_modules['os']) + ['getpid', 'getuid', 'sys']) for k in self._white_list_partial_modules.keys(): if k.startswith('Crypto'): del self._white_list_partial_modules[k] webob_path = os.path.join(SDK_ROOT, 'lib', 'webob-1.1.1') if webob_path not in sys.path: sys.path.insert(1, webob_path) for libentry in self._config.GetAllLibraries(): self._enabled_modules.append(libentry.name) extra = self.__PY27_OPTIONAL_ALLOWED_MODULES.get(libentry.name) logging.debug('Enabling %s: %r', libentry.name, extra) if extra: self._white_list_c_modules.extend(extra) if libentry.name == 'django': if 'django' not in self._module_dict: version = libentry.version if version == 'latest': django_library = appinfo._NAME_TO_SUPPORTED_LIBRARY['django'] version = django_library.non_deprecated_versions[-1] if google.__name__.endswith('3'): try: __import__('django.v' + version.replace('.', '_')) continue except ImportError: sys.modules.pop('django', None) sitedir = os.path.join(SDK_ROOT, 'lib', 'django-%s' % version) if os.path.isdir(sitedir): logging.debug('Enabling Django version %s at %s', version, sitedir) sys.path[:] = [dirname for dirname in sys.path if not dirname.startswith(os.path.join( SDK_ROOT, 'lib', 'django'))] sys.path.insert(1, sitedir) else: logging.warn('Enabling Django version %s (no directory found)', version) elif libentry.name == 'endpoints': try: from google.third_party.apphosting.python.endpoints import v1_0 sys.path.append(os.path.dirname(v1_0.__file__)) del v1_0 except ImportError: endpoints_path = os.path.join(SDK_ROOT, 'lib', 'endpoints-1.0') if endpoints_path not in sys.path: sys.path.append(endpoints_path) @Trace def find_module(self, fullname, path=None): """See PEP 302.""" if fullname in ('cPickle', 'thread'): return self search_path = path all_modules = fullname.split('.') try: for index, current_module in enumerate(all_modules): current_module_fullname = '.'.join(all_modules[:index + 1]) if (current_module_fullname == fullname and not self.StubModuleExists(fullname)): self.FindModuleRestricted(current_module, current_module_fullname, search_path) else: if current_module_fullname in self._module_dict: module = self._module_dict[current_module_fullname] else: module = self.FindAndLoadModule(current_module, current_module_fullname, search_path) if hasattr(module, '__path__'): search_path = module.__path__ except CouldNotFindModuleError: return None except Py27OptionalModuleError, err: logging.error(err) raise return self def StubModuleExists(self, name): """Check if the named module has a stub replacement.""" if name in sys.builtin_module_names: name = 'py_%s' % name if self._config and self._config.runtime == 'python27': if name in dist27.MODULE_OVERRIDES: return True else: if name in dist.__all__: return True return False def ImportStubModule(self, name): """Import the stub module replacement for the specified module.""" if name in sys.builtin_module_names: name = 'py_%s' % name providing_dist = dist if self._config and self._config.runtime == 'python27': if name in dist27.__all__: providing_dist = dist27 fullname = '%s.%s' % (providing_dist.__name__, name) __import__(fullname, {}, {}) return sys.modules[fullname] @Trace def FixModule(self, module): """Prunes and overrides restricted module attributes. Args: module: The module to prune. This should be a new module whose attributes reference back to the real module's __dict__ members. """ if module.__name__ in self._white_list_partial_modules: allowed_symbols = self._white_list_partial_modules[module.__name__] for symbol in set(module.__dict__) - set(allowed_symbols): if not (symbol.startswith('__') and symbol.endswith('__')): del module.__dict__[symbol] if module.__name__ in self._MODULE_OVERRIDES: module.__dict__.update(self._MODULE_OVERRIDES[module.__name__]) if module.__name__ == 'urllib' and NeedsMacOSXProxyFakes(): module.__dict__.update( {'proxy_bypass_macosx_sysconf': FakeProxyBypassMacOSXSysconf, 'getproxies_macosx_sysconf': FakeGetProxiesMacOSXSysconf}) @Trace def FindModuleRestricted(self, submodule, submodule_fullname, search_path): """Locates a module while enforcing module import restrictions. Args: submodule: The short name of the submodule (i.e., the last section of the fullname; for 'foo.bar' this would be 'bar'). submodule_fullname: The fully qualified name of the module to find (e.g., 'foo.bar'). search_path: List of paths to search for to find this module. Should be None if the current sys.path should be used. Returns: Tuple (source_file, pathname, description) where: source_file: File-like object that contains the module; in the case of packages, this will be None, which implies to look at __init__.py. pathname: String containing the full path of the module on disk. description: Tuple returned by imp.find_module(). However, in the case of an import using a path hook (e.g. a zipfile), source_file will be a PEP-302-style loader object, pathname will be None, and description will be a tuple filled with None values. Raises: ImportError exception if the requested module was found, but importing it is disallowed. CouldNotFindModuleError exception if the request module could not even be found for import. """ if search_path is None: search_path = [None] + sys.path py27_optional = False py27_enabled = False topmodule = None if self._config and self._config.runtime == 'python27': topmodule = submodule_fullname.split('.')[0] if topmodule in self.__PY27_OPTIONAL_ALLOWED_MODULES: py27_optional = True py27_enabled = topmodule in self._enabled_modules elif topmodule == 'Crypto': py27_optional = True py27_enabled = 'pycrypto' in self._enabled_modules import_error = None for path_entry in search_path: result = self.FindPathHook(submodule, submodule_fullname, path_entry) if result is not None: source_file, pathname, description = result if description == (None, None, None): return result suffix, mode, file_type = description try: if (file_type not in (self._imp.C_BUILTIN, self._imp.C_EXTENSION)): pkg_pathname = pathname if file_type == self._imp.PKG_DIRECTORY: pkg_pathname = os.path.join(pkg_pathname, '__init__.py') if not FakeFile.IsFileAccessible( pkg_pathname, py27_optional=py27_optional): error_message = 'Access to module file denied: %s' % pathname logging.debug(error_message) raise ImportError(error_message) if (file_type not in self._ENABLED_FILE_TYPES and submodule not in self._white_list_c_modules): error_message = ('Could not import "%s": Disallowed C-extension ' 'or built-in module' % submodule_fullname) logging.debug(error_message) raise ImportError(error_message) if (py27_optional and not py27_enabled and not pathname.startswith(self._app_code_path)): error_message = ('Third party package %s not enabled.' % topmodule) logging.debug(error_message) raise ImportError(error_message) return source_file, pathname, description except ImportError, e: import_error = e if py27_optional and submodule_fullname == topmodule: if py27_enabled: msg = ('Third party package %s was enabled in app.yaml ' 'but not found on import. You may have to download ' 'and install it.' % topmodule) else: msg = ('Third party package %s must be included in the ' '"libraries:" clause of your app.yaml file ' 'in order to be imported.' % topmodule) logging.debug(msg) raise Py27OptionalModuleError(msg) if import_error: raise import_error self.log('Could not find module "%s"', submodule_fullname) raise CouldNotFindModuleError() def FindPathHook(self, submodule, submodule_fullname, path_entry): """Helper for FindModuleRestricted to find a module in a sys.path entry. Args: submodule: submodule_fullname: path_entry: A single sys.path entry, or None representing the builtins. Returns: Either None (if nothing was found), or a triple (source_file, path_name, description). See the doc string for FindModuleRestricted() for the meaning of the latter. """ if path_entry is None: if submodule_fullname in sys.builtin_module_names: try: result = self._imp.find_module(submodule) except ImportError: pass else: source_file, pathname, description = result suffix, mode, file_type = description if file_type == self._imp.C_BUILTIN: return result return None if path_entry in sys.path_importer_cache: importer = sys.path_importer_cache[path_entry] else: importer = None for hook in sys.path_hooks: try: importer = hook(path_entry) break except ImportError: pass sys.path_importer_cache[path_entry] = importer if importer is None: try: return self._imp.find_module(submodule, [path_entry]) except ImportError: pass else: loader = importer.find_module(submodule_fullname) if loader is not None: return (loader, None, (None, None, None)) return None @Trace def LoadModuleRestricted(self, submodule_fullname, source_file, pathname, description): """Loads a module while enforcing module import restrictions. As a byproduct, the new module will be added to the module dictionary. Args: submodule_fullname: The fully qualified name of the module to find (e.g., 'foo.bar'). source_file: File-like object that contains the module's source code, or a PEP-302-style loader object. pathname: String containing the full path of the module on disk. description: Tuple returned by imp.find_module(), or (None, None, None) in case source_file is a PEP-302-style loader object. Returns: The new module. Raises: ImportError exception of the specified module could not be loaded for whatever reason. """ if description == (None, None, None): return source_file.load_module(submodule_fullname) try: try: return self._imp.load_module(submodule_fullname, source_file, pathname, description) except: if submodule_fullname in self._module_dict: del self._module_dict[submodule_fullname] raise finally: if source_file is not None: source_file.close() @Trace def FindAndLoadModule(self, submodule, submodule_fullname, search_path): """Finds and loads a module, loads it, and adds it to the module dictionary. Args: submodule: Name of the module to import (e.g., baz). submodule_fullname: Full name of the module to import (e.g., foo.bar.baz). search_path: Path to use for searching for this submodule. For top-level modules this should be None; otherwise it should be the __path__ attribute from the parent package. Returns: A new module instance that has been inserted into the module dictionary supplied to __init__. Raises: ImportError exception if the module could not be loaded for whatever reason (e.g., missing, not allowed). """ module = self._imp.new_module(submodule_fullname) if submodule_fullname == 'thread': module.__dict__.update(self._dummy_thread.__dict__) module.__name__ = 'thread' elif submodule_fullname == 'cPickle': module.__dict__.update(self._pickle.__dict__) module.__name__ = 'cPickle' elif submodule_fullname == 'os': module.__dict__.update(self._os.__dict__) elif submodule_fullname == 'ssl': pass elif self.StubModuleExists(submodule_fullname): module = self.ImportStubModule(submodule_fullname) else: source_file, pathname, description = self.FindModuleRestricted(submodule, submodule_fullname, search_path) module = self.LoadModuleRestricted(submodule_fullname, source_file, pathname, description) if (getattr(module, '__path__', None) is not None and search_path != self._app_code_path): try: app_search_path = os.path.join(self._app_code_path, *(submodule_fullname.split('.')[:-1])) source_file, pathname, description = self.FindModuleRestricted(submodule, submodule_fullname, [app_search_path]) module.__path__.append(pathname) except ImportError, e: pass module.__loader__ = self self.FixModule(module) if submodule_fullname not in self._module_dict: self._module_dict[submodule_fullname] = module if submodule_fullname != submodule: parent_module = self._module_dict.get( submodule_fullname[:-len(submodule) - 1]) if parent_module and not hasattr(parent_module, submodule): setattr(parent_module, submodule, module) if submodule_fullname == 'os': os_path_name = module.path.__name__ os_path = self.FindAndLoadModule(os_path_name, os_path_name, search_path) self._module_dict['os.path'] = os_path module.__dict__['path'] = os_path return module @Trace def GetParentPackage(self, fullname): """Retrieves the parent package of a fully qualified module name. Args: fullname: Full name of the module whose parent should be retrieved (e.g., foo.bar). Returns: Module instance for the parent or None if there is no parent module. Raise: ImportError exception if the module's parent could not be found. """ all_modules = fullname.split('.') parent_module_fullname = '.'.join(all_modules[:-1]) if parent_module_fullname: if self.find_module(fullname) is None: raise ImportError('Could not find module %s' % fullname) return self._module_dict[parent_module_fullname] return None @Trace def GetParentSearchPath(self, fullname): """Determines the search path of a module's parent package. Args: fullname: Full name of the module to look up (e.g., foo.bar). Returns: Tuple (submodule, search_path) where: submodule: The last portion of the module name from fullname (e.g., if fullname is foo.bar, then this is bar). search_path: List of paths that belong to the parent package's search path or None if there is no parent package. Raises: ImportError exception if the module or its parent could not be found. """ submodule = GetSubmoduleName(fullname) parent_package = self.GetParentPackage(fullname) search_path = None if parent_package is not None and hasattr(parent_package, '__path__'): search_path = parent_package.__path__ return submodule, search_path @Trace def GetModuleInfo(self, fullname): """Determines the path on disk and the search path of a module or package. Args: fullname: Full name of the module to look up (e.g., foo.bar). Returns: Tuple (pathname, search_path, submodule) where: pathname: String containing the full path of the module on disk, or None if the module wasn't loaded from disk (e.g. from a zipfile). search_path: List of paths that belong to the found package's search path or None if found module is not a package. submodule: The relative name of the submodule that's being imported. """ submodule, search_path = self.GetParentSearchPath(fullname) source_file, pathname, description = self.FindModuleRestricted(submodule, fullname, search_path) suffix, mode, file_type = description module_search_path = None if file_type == self._imp.PKG_DIRECTORY: module_search_path = [pathname] pathname = os.path.join(pathname, '__init__%spy' % os.extsep) return pathname, module_search_path, submodule @Trace def load_module(self, fullname): """See PEP 302.""" all_modules = fullname.split('.') submodule = all_modules[-1] parent_module_fullname = '.'.join(all_modules[:-1]) search_path = None if parent_module_fullname and parent_module_fullname in self._module_dict: parent_module = self._module_dict[parent_module_fullname] if hasattr(parent_module, '__path__'): search_path = parent_module.__path__ return self.FindAndLoadModule(submodule, fullname, search_path) @Trace def is_package(self, fullname): """See PEP 302 extensions.""" submodule, search_path = self.GetParentSearchPath(fullname) source_file, pathname, description = self.FindModuleRestricted(submodule, fullname, search_path) suffix, mode, file_type = description if file_type == self._imp.PKG_DIRECTORY: return True return False @Trace def get_source(self, fullname): """See PEP 302 extensions.""" full_path, search_path, submodule = self.GetModuleInfo(fullname) if full_path is None: return None source_file = open(full_path) try: return source_file.read() finally: source_file.close() @Trace def get_code(self, fullname): """See PEP 302 extensions.""" full_path, search_path, submodule = self.GetModuleInfo(fullname) if full_path is None: return None source_file = open(full_path) try: source_code = source_file.read() finally: source_file.close() source_code = source_code.replace('\r\n', '\n') if not source_code.endswith('\n'): source_code += '\n' encoding = DEFAULT_ENCODING for line in source_code.split('\n', 2)[:2]: matches = CODING_COOKIE_RE.findall(line) if matches: encoding = matches[0].lower() source_code.decode(encoding) return compile(source_code, full_path, 'exec')
bsd-3-clause
neurobiofisica/IntelSBESC2014
sw/ProjetoUserSpace/Plotter.py
1
2171
#!/usr/bin/env python # -*- encoding: utf-8 -*- import matplotlib.pyplot as plt import numpy as np class PlotISI: def __init__(self, spk_num=1, markersize=5, linewidth=2): """Plots the ISI of the arriving data.""" self.ms = markersize self.lw = linewidth self.fig = plt.figure(1) self.ax = [] self.line = [] for i in range(8): self.ax.append(self.fig.add_subplot(4,2,i+1)) self.line.append(self.ax[-1].plot([],[],linestyle='solid', marker='o', \ color="#000000",markersize=self.ms,linewidth=self.lw)[0]) self.first_ts = [] self.timestamps = [] self.isi = [] self.ts_now = [] for i in range(8): self.first_ts.append(0) self.timestamps.append([]) self.isi.append([]) self.ts_now.append(None) self.ymin = 0 self.ymax = 2 #s #self.ax.set_title('Inter Spike Intervals') #self.ax.set_xlabel('time (s)') #self.ax.set_ylabel('Inter Spike Interval (ms)') self.fig.show() # TODO: Fazer subplot para varios canais def process(self, flags, timestamp): for i in range(8): if ((1 << i) & flags) != 0: if self.ts_now[i] == None: # Register the first timestamp self.first_ts[i] = timestamp self.ts_now[i] = 0. self.timestamps[i].append(0.) else: self.isi[i].append((timestamp-self.first_ts[i]) - self.ts_now[i]) self.timestamps[i].append(timestamp-self.first_ts[i]) self.ts_now[i] = timestamp-self.first_ts[i] # Sets the new data self.line[i].set_xdata(self.timestamps[i][1:]) self.line[i].set_ydata(self.isi[i]) # Adjust axes xmax = self.timestamps[i][-1] xmin = xmax - 30 self.ax[i].axis([xmin,xmax,self.ymin,self.ymax]) # Draw figure self.fig.canvas.draw()
mit
plissonf/scikit-learn
sklearn/metrics/classification.py
95
67713
"""Metrics to assess performance on classification task given classe prediction Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Olivier Grisel <[email protected]> # Arnaud Joly <[email protected]> # Jochen Wersdorfer <[email protected]> # Lars Buitinck <[email protected]> # Joel Nothman <[email protected]> # Noel Dawe <[email protected]> # Jatin Shah <[email protected]> # Saurabh Jha <[email protected]> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy.sparse import csr_matrix from scipy.spatial.distance import hamming as sp_hamming from ..preprocessing import LabelBinarizer, label_binarize from ..preprocessing import LabelEncoder from ..utils import check_array from ..utils import check_consistent_length from ..utils import column_or_1d from ..utils.multiclass import unique_labels from ..utils.multiclass import type_of_target from ..utils.validation import _num_samples from ..utils.sparsefuncs import count_nonzero from ..utils.fixes import bincount from .base import UndefinedMetricWarning def _check_targets(y_true, y_pred): """Check that y_true and y_pred belong to the same classification task This converts multiclass or binary types to a common shape, and raises a ValueError for a mix of multilabel and multiclass targets, a mix of multilabel formats, for the presence of continuous-valued or multioutput targets, or for targets of different lengths. Column vectors are squeezed to 1d, while multilabel formats are returned as CSR sparse label indicators. Parameters ---------- y_true : array-like y_pred : array-like Returns ------- type_true : one of {'multilabel-indicator', 'multiclass', 'binary'} The type of the true target data, as output by ``utils.multiclass.type_of_target`` y_true : array or indicator matrix y_pred : array or indicator matrix """ check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true) type_pred = type_of_target(y_pred) y_type = set([type_true, type_pred]) if y_type == set(["binary", "multiclass"]): y_type = set(["multiclass"]) if len(y_type) > 1: raise ValueError("Can't handle mix of {0} and {1}" "".format(type_true, type_pred)) # We can't have more than one value on y_type => The set is no more needed y_type = y_type.pop() # No metrics support "multiclass-multioutput" format if (y_type not in ["binary", "multiclass", "multilabel-indicator"]): raise ValueError("{0} is not supported".format(y_type)) if y_type in ["binary", "multiclass"]: y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type.startswith('multilabel'): y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = 'multilabel-indicator' return y_type, y_true, y_pred def _weighted_sum(sample_score, sample_weight, normalize=False): if normalize: return np.average(sample_score, weights=sample_weight) elif sample_weight is not None: return np.dot(sample_score, sample_weight) else: return sample_score.sum() def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None): """Accuracy classification score. In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must *exactly* match the corresponding set of labels in y_true. Read more in the :ref:`User Guide <accuracy_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the correctly classified samples (float), else it returns the number of correctly classified samples (int). The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- jaccard_similarity_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equal to the ``jaccard_similarity_score`` function. Examples -------- >>> import numpy as np >>> from sklearn.metrics import accuracy_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> accuracy_score(y_true, y_pred) 0.5 >>> accuracy_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): differing_labels = count_nonzero(y_true - y_pred, axis=1) score = differing_labels == 0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def confusion_matrix(y_true, y_pred, labels=None): """Compute confusion matrix to evaluate the accuracy of a classification By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}` is equal to the number of observations known to be in group :math:`i` but predicted to be in group :math:`j`. Read more in the :ref:`User Guide <confusion_matrix>`. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. labels : array, shape = [n_classes], optional List of labels to index the matrix. This may be used to reorder or select a subset of labels. If none is given, those that appear at least once in ``y_true`` or ``y_pred`` are used in sorted order. Returns ------- C : array, shape = [n_classes, n_classes] Confusion matrix References ---------- .. [1] `Wikipedia entry for the Confusion matrix <http://en.wikipedia.org/wiki/Confusion_matrix>`_ Examples -------- >>> from sklearn.metrics import confusion_matrix >>> y_true = [2, 0, 2, 2, 0, 1] >>> y_pred = [0, 0, 2, 2, 0, 2] >>> confusion_matrix(y_true, y_pred) array([[2, 0, 0], [0, 0, 1], [1, 0, 2]]) >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] >>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"]) array([[2, 0, 0], [0, 0, 1], [1, 0, 2]]) """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type not in ("binary", "multiclass"): raise ValueError("%s is not supported" % y_type) if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) n_labels = labels.size label_to_ind = dict((y, x) for x, y in enumerate(labels)) # convert yt, yp into index y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred]) y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true]) # intersect y_pred, y_true with labels, eliminate items not in labels ind = np.logical_and(y_pred < n_labels, y_true < n_labels) y_pred = y_pred[ind] y_true = y_true[ind] CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)), shape=(n_labels, n_labels) ).toarray() return CM def cohen_kappa_score(y1, y2, labels=None): """Cohen's kappa: a statistic that measures inter-annotator agreement. This function computes Cohen's kappa [1], a score that expresses the level of agreement between two annotators on a classification problem. It is defined as .. math:: \kappa = (p_o - p_e) / (1 - p_e) where :math:`p_o` is the empirical probability of agreement on the label assigned to any sample (the observed agreement ratio), and :math:`p_e` is the expected agreement when both annotators assign labels randomly. :math:`p_e` is estimated using a per-annotator empirical prior over the class labels [2]. Parameters ---------- y1 : array, shape = [n_samples] Labels assigned by the first annotator. y2 : array, shape = [n_samples] Labels assigned by the second annotator. The kappa statistic is symmetric, so swapping ``y1`` and ``y2`` doesn't change the value. labels : array, shape = [n_classes], optional List of labels to index the matrix. This may be used to select a subset of labels. If None, all labels that appear at least once in ``y1`` or ``y2`` are used. Returns ------- kappa : float The kappa statistic, which is a number between -1 and 1. The maximum value means complete agreement; zero or lower means chance agreement. References ---------- .. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales". Educational and Psychological Measurement 20(1):37-46. doi:10.1177/001316446002000104. .. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for computational linguistics". Computational Linguistic 34(4):555-596. """ confusion = confusion_matrix(y1, y2, labels=labels) P = confusion / float(confusion.sum()) p_observed = np.trace(P) p_expected = np.dot(P.sum(axis=0), P.sum(axis=1)) return (p_observed - p_expected) / (1 - p_expected) def jaccard_similarity_score(y_true, y_pred, normalize=True, sample_weight=None): """Jaccard similarity coefficient score The Jaccard index [1], or Jaccard similarity coefficient, defined as the size of the intersection divided by the size of the union of two label sets, is used to compare set of predicted labels for a sample to the corresponding set of labels in ``y_true``. Read more in the :ref:`User Guide <jaccard_similarity_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the sum of the Jaccard similarity coefficient over the sample set. Otherwise, return the average of Jaccard similarity coefficient. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the average Jaccard similarity coefficient, else it returns the sum of the Jaccard similarity coefficient over the sample set. The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- accuracy_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equivalent to the ``accuracy_score``. It differs in the multilabel classification problem. References ---------- .. [1] `Wikipedia entry for the Jaccard index <http://en.wikipedia.org/wiki/Jaccard_index>`_ Examples -------- >>> import numpy as np >>> from sklearn.metrics import jaccard_similarity_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> jaccard_similarity_score(y_true, y_pred) 0.5 >>> jaccard_similarity_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\ np.ones((2, 2))) 0.75 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): with np.errstate(divide='ignore', invalid='ignore'): # oddly, we may get an "invalid" rather than a "divide" error here pred_or_true = count_nonzero(y_true + y_pred, axis=1) pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1) score = pred_and_true / pred_or_true # If there is no label, it results in a Nan instead, we set # the jaccard to 1: lim_{x->0} x/x = 1 # Note with py2.6 and np 1.3: we can't check safely for nan. score[pred_or_true == 0.0] = 1.0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def matthews_corrcoef(y_true, y_pred): """Compute the Matthews correlation coefficient (MCC) for binary classes The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary (two-class) classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] Only in the binary case does this relate to information about true and false positives and negatives. See references below. Read more in the :ref:`User Guide <matthews_corrcoef>`. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. Returns ------- mcc : float The Matthews correlation coefficient (+1 represents a perfect prediction, 0 an average random prediction and -1 and inverse prediction). References ---------- .. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the accuracy of prediction algorithms for classification: an overview <http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_ .. [2] `Wikipedia entry for the Matthews Correlation Coefficient <http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_ Examples -------- >>> from sklearn.metrics import matthews_corrcoef >>> y_true = [+1, +1, +1, -1] >>> y_pred = [+1, -1, +1, +1] >>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS -0.33... """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type != "binary": raise ValueError("%s is not supported" % y_type) lb = LabelEncoder() lb.fit(np.hstack([y_true, y_pred])) y_true = lb.transform(y_true) y_pred = lb.transform(y_pred) with np.errstate(invalid='ignore'): mcc = np.corrcoef(y_true, y_pred)[0, 1] if np.isnan(mcc): return 0. else: return mcc def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None): """Zero-one classification loss. If normalize is ``True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). The best performance is 0. Read more in the :ref:`User Guide <zero_one_loss>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of misclassifications. Otherwise, return the fraction of misclassifications. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float or int, If ``normalize == True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). Notes ----- In multilabel classification, the zero_one_loss function corresponds to the subset zero-one loss: for each sample, the entire set of labels must be correctly predicted, otherwise the loss for that sample is equal to one. See also -------- accuracy_score, hamming_loss, jaccard_similarity_score Examples -------- >>> from sklearn.metrics import zero_one_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> zero_one_loss(y_true, y_pred) 0.25 >>> zero_one_loss(y_true, y_pred, normalize=False) 1 In the multilabel case with binary label indicators: >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ score = accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight) if normalize: return 1 - score else: if sample_weight is not None: n_samples = np.sum(sample_weight) else: n_samples = _num_samples(y_true) return n_samples - score def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the F1 score, also known as balanced F-score or F-measure The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) In the multi-class and multi-label case, this is the weighted average of the F1 score of each class. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- f1_score : float or array of float, shape = [n_unique_labels] F1 score of the positive class in binary classification or weighted average of the F1 scores of each class for the multiclass task. References ---------- .. [1] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import f1_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average=None) array([ 0.8, 0. , 0. ]) """ return fbeta_score(y_true, y_pred, 1, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the F-beta score The F-beta score is the weighted harmonic mean of precision and recall, reaching its optimal value at 1 and its worst value at 0. The `beta` parameter determines the weight of precision in the combined score. ``beta < 1`` lends more weight to precision, while ``beta > 1`` favors recall (``beta -> 0`` considers only precision, ``beta -> inf`` only recall). Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta: float Weight of precision in harmonic mean. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fbeta_score : float (if average is not None) or array of float, shape =\ [n_unique_labels] F-beta score of the positive class in binary classification or weighted average of the F-beta score of each class for the multiclass task. References ---------- .. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 327-328. .. [2] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import fbeta_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> fbeta_score(y_true, y_pred, average='macro', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average='micro', beta=0.5) ... # doctest: +ELLIPSIS 0.33... >>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average=None, beta=0.5) ... # doctest: +ELLIPSIS array([ 0.71..., 0. , 0. ]) """ _, _, f, _ = precision_recall_fscore_support(y_true, y_pred, beta=beta, labels=labels, pos_label=pos_label, average=average, warn_for=('f-score',), sample_weight=sample_weight) return f def _prf_divide(numerator, denominator, metric, modifier, average, warn_for): """Performs division and handles divide-by-zero. On zero-division, sets the corresponding result elements to zero and raises a warning. The metric, modifier and average arguments are used only for determining an appropriate warning. """ result = numerator / denominator mask = denominator == 0.0 if not np.any(mask): return result # remove infs result[mask] = 0.0 # build appropriate warning # E.g. "Precision and F-score are ill-defined and being set to 0.0 in # labels with no predicted samples" axis0 = 'sample' axis1 = 'label' if average == 'samples': axis0, axis1 = axis1, axis0 if metric in warn_for and 'f-score' in warn_for: msg_start = '{0} and F-score are'.format(metric.title()) elif metric in warn_for: msg_start = '{0} is'.format(metric.title()) elif 'f-score' in warn_for: msg_start = 'F-score is' else: return result msg = ('{0} ill-defined and being set to 0.0 {{0}} ' 'no {1} {2}s.'.format(msg_start, modifier, axis0)) if len(mask) == 1: msg = msg.format('due to') else: msg = msg.format('in {0}s with'.format(axis1)) warnings.warn(msg, UndefinedMetricWarning, stacklevel=2) return result def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None, pos_label=1, average=None, warn_for=('precision', 'recall', 'f-score'), sample_weight=None): """Compute precision, recall, F-measure and support for each class The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The F-beta score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0. The F-beta score weights recall more than precision by a factor of ``beta``. ``beta == 1.0`` means recall and precision are equally important. The support is the number of occurrences of each class in ``y_true``. If ``pos_label is None`` and in binary classification, this function returns the average precision, recall and F-measure if ``average`` is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta : float, 1.0 by default The strength of recall versus precision in the F-score. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \ 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. warn_for : tuple or set, for internal use This determines which warnings will be made in the case that this function is being used to return only one of its metrics. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision: float (if average is not None) or array of float, shape =\ [n_unique_labels] recall: float (if average is not None) or array of float, , shape =\ [n_unique_labels] fbeta_score: float (if average is not None) or array of float, shape =\ [n_unique_labels] support: int (if average is not None) or array of int, shape =\ [n_unique_labels] The number of occurrences of each label in ``y_true``. References ---------- .. [1] `Wikipedia entry for the Precision and recall <http://en.wikipedia.org/wiki/Precision_and_recall>`_ .. [2] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_ .. [3] `Discriminative Methods for Multi-labeled Classification Advances in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu Godbole, Sunita Sarawagi <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>` Examples -------- >>> from sklearn.metrics import precision_recall_fscore_support >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) >>> precision_recall_fscore_support(y_true, y_pred, average='macro') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='micro') ... # doctest: +ELLIPSIS (0.33..., 0.33..., 0.33..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) It is possible to compute per-label precisions, recalls, F1-scores and supports instead of averaging: >>> precision_recall_fscore_support(y_true, y_pred, average=None, ... labels=['pig', 'dog', 'cat']) ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE (array([ 0. , 0. , 0.66...]), array([ 0., 0., 1.]), array([ 0. , 0. , 0.8]), array([2, 2, 2])) """ average_options = (None, 'micro', 'macro', 'weighted', 'samples') if average not in average_options and average != 'binary': raise ValueError('average has to be one of ' + str(average_options)) if beta <= 0: raise ValueError("beta should be >0 in the F-beta score") y_type, y_true, y_pred = _check_targets(y_true, y_pred) present_labels = unique_labels(y_true, y_pred) if average == 'binary' and (y_type != 'binary' or pos_label is None): warnings.warn('The default `weighted` averaging is deprecated, ' 'and from version 0.18, use of precision, recall or ' 'F-score with multiclass or multilabel data or ' 'pos_label=None will result in an exception. ' 'Please set an explicit value for `average`, one of ' '%s. In cross validation use, for instance, ' 'scoring="f1_weighted" instead of scoring="f1".' % str(average_options), DeprecationWarning, stacklevel=2) average = 'weighted' if y_type == 'binary' and pos_label is not None and average is not None: if average != 'binary': warnings.warn('From version 0.18, binary input will not be ' 'handled specially when using averaged ' 'precision/recall/F-score. ' 'Please use average=\'binary\' to report only the ' 'positive class performance.', DeprecationWarning) if labels is None or len(labels) <= 2: if pos_label not in present_labels: if len(present_labels) < 2: # Only negative labels return (0., 0., 0., 0) else: raise ValueError("pos_label=%r is not a valid label: %r" % (pos_label, present_labels)) labels = [pos_label] if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) ### Calculate tp_sum, pred_sum, true_sum ### if y_type.startswith('multilabel'): sum_axis = 1 if average == 'samples' else 0 # All labels are index integers for multilabel. # Select labels: if not np.all(labels == present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels). ' 'Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels). ' 'Got %d < 0' % np.min(labels)) y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] # calculate weighted counts true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) elif average == 'samples': raise ValueError("Sample-based precision, recall, fscore is " "not meaningful outside multilabel " "classification. See the accuracy_score instead.") else: le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ # labels are now from 0 to len(labels) - 1 -> use bincount tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: # Pathological case true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = bincount(y_true, weights=sample_weight, minlength=len(labels)) # Retain only selected labels indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] if average == 'micro': tp_sum = np.array([tp_sum.sum()]) pred_sum = np.array([pred_sum.sum()]) true_sum = np.array([true_sum.sum()]) ### Finally, we have all our sufficient statistics. Divide! ### beta2 = beta ** 2 with np.errstate(divide='ignore', invalid='ignore'): # Divide, and on zero-division, set scores to 0 and warn: # Oddly, we may get an "invalid" rather than a "divide" error # here. precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, warn_for) recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, warn_for) # Don't need to warn for F: either P or R warned, or tp == 0 where pos # and true are nonzero, in which case, F is well-defined and zero f_score = ((1 + beta2) * precision * recall / (beta2 * precision + recall)) f_score[tp_sum == 0] = 0.0 ## Average the results ## if average == 'weighted': weights = true_sum if weights.sum() == 0: return 0, 0, 0, None elif average == 'samples': weights = sample_weight else: weights = None if average is not None: assert average != 'binary' or len(precision) == 1 precision = np.average(precision, weights=weights) recall = np.average(recall, weights=weights) f_score = np.average(f_score, weights=weights) true_sum = None # return no support return precision, recall, f_score, true_sum def precision_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the precision The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision : float (if average is not None) or array of float, shape =\ [n_unique_labels] Precision of the positive class in binary classification or weighted average of the precision of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import precision_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> precision_score(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS array([ 0.66..., 0. , 0. ]) """ p, _, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('precision',), sample_weight=sample_weight) return p def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the recall The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- recall : float (if average is not None) or array of float, shape =\ [n_unique_labels] Recall of the positive class in binary classification or weighted average of the recall of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import recall_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average=None) array([ 1., 0., 0.]) """ _, r, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('recall',), sample_weight=sample_weight) return r def classification_report(y_true, y_pred, labels=None, target_names=None, sample_weight=None, digits=2): """Build a text report showing the main classification metrics Read more in the :ref:`User Guide <classification_report>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array, shape = [n_labels] Optional list of label indices to include in the report. target_names : list of strings Optional display names matching the labels (same order). sample_weight : array-like of shape = [n_samples], optional Sample weights. digits : int Number of digits for formatting output floating point values Returns ------- report : string Text summary of the precision, recall, F1 score for each class. Examples -------- >>> from sklearn.metrics import classification_report >>> y_true = [0, 1, 2, 2, 2] >>> y_pred = [0, 0, 2, 2, 1] >>> target_names = ['class 0', 'class 1', 'class 2'] >>> print(classification_report(y_true, y_pred, target_names=target_names)) precision recall f1-score support <BLANKLINE> class 0 0.50 1.00 0.67 1 class 1 0.00 0.00 0.00 1 class 2 1.00 0.67 0.80 3 <BLANKLINE> avg / total 0.70 0.60 0.61 5 <BLANKLINE> """ if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) last_line_heading = 'avg / total' if target_names is None: width = len(last_line_heading) target_names = ['%s' % l for l in labels] else: width = max(len(cn) for cn in target_names) width = max(width, len(last_line_heading), digits) headers = ["precision", "recall", "f1-score", "support"] fmt = '%% %ds' % width # first column: class name fmt += ' ' fmt += ' '.join(['% 9s' for _ in headers]) fmt += '\n' headers = [""] + headers report = fmt % tuple(headers) report += '\n' p, r, f1, s = precision_recall_fscore_support(y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight) for i, label in enumerate(labels): values = [target_names[i]] for v in (p[i], r[i], f1[i]): values += ["{0:0.{1}f}".format(v, digits)] values += ["{0}".format(s[i])] report += fmt % tuple(values) report += '\n' # compute averages values = [last_line_heading] for v in (np.average(p, weights=s), np.average(r, weights=s), np.average(f1, weights=s)): values += ["{0:0.{1}f}".format(v, digits)] values += ['{0}'.format(np.sum(s))] report += fmt % tuple(values) return report def hamming_loss(y_true, y_pred, classes=None): """Compute the average Hamming loss. The Hamming loss is the fraction of labels that are incorrectly predicted. Read more in the :ref:`User Guide <hamming_loss>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. classes : array, shape = [n_labels], optional Integer array of labels. Returns ------- loss : float or int, Return the average Hamming loss between element of ``y_true`` and ``y_pred``. See Also -------- accuracy_score, jaccard_similarity_score, zero_one_loss Notes ----- In multiclass classification, the Hamming loss correspond to the Hamming distance between ``y_true`` and ``y_pred`` which is equivalent to the subset ``zero_one_loss`` function. In multilabel classification, the Hamming loss is different from the subset zero-one loss. The zero-one loss considers the entire set of labels for a given sample incorrect if it does entirely match the true set of labels. Hamming loss is more forgiving in that it penalizes the individual labels. The Hamming loss is upperbounded by the subset zero-one loss. When normalized over samples, the Hamming loss is always between 0 and 1. References ---------- .. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification: An Overview. International Journal of Data Warehousing & Mining, 3(3), 1-13, July-September 2007. .. [2] `Wikipedia entry on the Hamming distance <http://en.wikipedia.org/wiki/Hamming_distance>`_ Examples -------- >>> from sklearn.metrics import hamming_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> hamming_loss(y_true, y_pred) 0.25 In the multilabel case with binary label indicators: >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2))) 0.75 """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if classes is None: classes = unique_labels(y_true, y_pred) else: classes = np.asarray(classes) if y_type.startswith('multilabel'): n_differences = count_nonzero(y_true - y_pred) return (n_differences / (y_true.shape[0] * len(classes))) elif y_type in ["binary", "multiclass"]: return sp_hamming(y_true, y_pred) else: raise ValueError("{0} is not supported".format(y_type)) def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None): """Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of the true labels given a probabilistic classifier's predictions. For a single sample with true label yt in {0,1} and estimated probability yp that yt = 1, the log loss is -log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp)) Read more in the :ref:`User Guide <log_loss>`. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels for n_samples samples. y_pred : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. eps : float Log loss is undefined for p=0 or p=1, so probabilities are clipped to max(eps, min(1 - eps, p)). normalize : bool, optional (default=True) If true, return the mean loss per sample. Otherwise, return the sum of the per-sample losses. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float Examples -------- >>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616... References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Notes ----- The logarithm used is the natural logarithm (base-e). """ lb = LabelBinarizer() T = lb.fit_transform(y_true) if T.shape[1] == 1: T = np.append(1 - T, T, axis=1) # Clipping Y = np.clip(y_pred, eps, 1 - eps) # This happens in cases when elements in y_pred have type "str". if not isinstance(Y, np.ndarray): raise ValueError("y_pred should be an array of floats.") # If y_pred is of single dimension, assume y_true to be binary # and then check. if Y.ndim == 1: Y = Y[:, np.newaxis] if Y.shape[1] == 1: Y = np.append(1 - Y, Y, axis=1) # Check if dimensions are consistent. check_consistent_length(T, Y) T = check_array(T) Y = check_array(Y) if T.shape[1] != Y.shape[1]: raise ValueError("y_true and y_pred have different number of classes " "%d, %d" % (T.shape[1], Y.shape[1])) # Renormalize Y /= Y.sum(axis=1)[:, np.newaxis] loss = -(T * np.log(Y)).sum(axis=1) return _weighted_sum(loss, sample_weight, normalize) def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None): """Average hinge loss (non-regularized) In binary class case, assuming labels in y_true are encoded with +1 and -1, when a prediction mistake is made, ``margin = y_true * pred_decision`` is always negative (since the signs disagree), implying ``1 - margin`` is always greater than 1. The cumulated hinge loss is therefore an upper bound of the number of mistakes made by the classifier. In multiclass case, the function expects that either all the labels are included in y_true or an optional labels argument is provided which contains all the labels. The multilabel margin is calculated according to Crammer-Singer's method. As in the binary case, the cumulated hinge loss is an upper bound of the number of mistakes made by the classifier. Read more in the :ref:`User Guide <hinge_loss>`. Parameters ---------- y_true : array, shape = [n_samples] True target, consisting of integers of two values. The positive label must be greater than the negative label. pred_decision : array, shape = [n_samples] or [n_samples, n_classes] Predicted decisions, as output by decision_function (floats). labels : array, optional, default None Contains all the labels for the problem. Used in multiclass hinge loss. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float References ---------- .. [1] `Wikipedia entry on the Hinge loss <http://en.wikipedia.org/wiki/Hinge_loss>`_ .. [2] Koby Crammer, Yoram Singer. On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines. Journal of Machine Learning Research 2, (2001), 265-292 .. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models by Robert C. Moore, John DeNero. <http://www.ttic.edu/sigml/symposium2011/papers/ Moore+DeNero_Regularization.pdf>`_ Examples -------- >>> from sklearn import svm >>> from sklearn.metrics import hinge_loss >>> X = [[0], [1]] >>> y = [-1, 1] >>> est = svm.LinearSVC(random_state=0) >>> est.fit(X, y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='squared_hinge', max_iter=1000, multi_class='ovr', penalty='l2', random_state=0, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-2], [3], [0.5]]) >>> pred_decision # doctest: +ELLIPSIS array([-2.18..., 2.36..., 0.09...]) >>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS 0.30... In the multiclass case: >>> X = np.array([[0], [1], [2], [3]]) >>> Y = np.array([0, 1, 2, 3]) >>> labels = np.array([0, 1, 2, 3]) >>> est = svm.LinearSVC() >>> est.fit(X, Y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='squared_hinge', max_iter=1000, multi_class='ovr', penalty='l2', random_state=None, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-1], [2], [3]]) >>> y_true = [0, 2, 3] >>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS 0.56... """ check_consistent_length(y_true, pred_decision, sample_weight) pred_decision = check_array(pred_decision, ensure_2d=False) y_true = column_or_1d(y_true) y_true_unique = np.unique(y_true) if y_true_unique.size > 2: if (labels is None and pred_decision.ndim > 1 and (np.size(y_true_unique) != pred_decision.shape[1])): raise ValueError("Please include all labels in y_true " "or pass labels as third argument") if labels is None: labels = y_true_unique le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) mask = np.ones_like(pred_decision, dtype=bool) mask[np.arange(y_true.shape[0]), y_true] = False margin = pred_decision[~mask] margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1), axis=1) else: # Handles binary class case # this code assumes that positive and negative labels # are encoded as +1 and -1 respectively pred_decision = column_or_1d(pred_decision) pred_decision = np.ravel(pred_decision) lbin = LabelBinarizer(neg_label=-1) y_true = lbin.fit_transform(y_true)[:, 0] try: margin = y_true * pred_decision except TypeError: raise TypeError("pred_decision should be an array of floats.") losses = 1 - margin # The hinge_loss doesn't penalize good enough predictions. losses[losses <= 0] = 0 return np.average(losses, weights=sample_weight) def _check_binary_probabilistic_predictions(y_true, y_prob): """Check that y_true is binary and y_prob contains valid probabilities""" check_consistent_length(y_true, y_prob) labels = np.unique(y_true) if len(labels) != 2: raise ValueError("Only binary classification is supported. " "Provided labels %s." % labels) if y_prob.max() > 1: raise ValueError("y_prob contains values greater than 1.") if y_prob.min() < 0: raise ValueError("y_prob contains values less than 0.") return label_binarize(y_true, labels)[:, 0] def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None): """Compute the Brier score. The smaller the Brier score, the better, hence the naming with "loss". Across all items in a set N predictions, the Brier score measures the mean squared difference between (1) the predicted probability assigned to the possible outcomes for item i, and (2) the actual outcome. Therefore, the lower the Brier score is for a set of predictions, the better the predictions are calibrated. Note that the Brier score always takes on a value between zero and one, since this is the largest possible difference between a predicted probability (which must be between zero and one) and the actual outcome (which can take on values of only 0 and 1). The Brier score is appropriate for binary and categorical outcomes that can be structured as true or false, but is inappropriate for ordinal variables which can take on three or more values (this is because the Brier score assumes that all possible outcomes are equivalently "distant" from one another). Which label is considered to be the positive label is controlled via the parameter pos_label, which defaults to 1. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. sample_weight : array-like of shape = [n_samples], optional Sample weights. pos_label : int (default: None) Label of the positive class. If None, the maximum label is used as positive class Returns ------- score : float Brier score Examples -------- >>> import numpy as np >>> from sklearn.metrics import brier_score_loss >>> y_true = np.array([0, 1, 1, 0]) >>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"]) >>> y_prob = np.array([0.1, 0.9, 0.8, 0.3]) >>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true_categorical, y_prob, \ pos_label="ham") # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, np.array(y_prob) > 0.5) 0.0 References ---------- http://en.wikipedia.org/wiki/Brier_score """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) if pos_label is None: pos_label = y_true.max() y_true = np.array(y_true == pos_label, int) y_true = _check_binary_probabilistic_predictions(y_true, y_prob) return np.average((y_true - y_prob) ** 2, weights=sample_weight)
bsd-3-clause
jblackburne/scikit-learn
sklearn/metrics/ranking.py
7
27659
"""Metrics to assess performance on classification task given scores Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Olivier Grisel <[email protected]> # Arnaud Joly <[email protected]> # Jochen Wersdorfer <[email protected]> # Lars Buitinck # Joel Nothman <[email protected]> # Noel Dawe <[email protected]> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy.sparse import csr_matrix from ..utils import assert_all_finite from ..utils import check_consistent_length from ..utils import column_or_1d, check_array from ..utils.multiclass import type_of_target from ..utils.extmath import stable_cumsum from ..utils.fixes import bincount from ..utils.fixes import array_equal from ..utils.stats import rankdata from ..utils.sparsefuncs import count_nonzero from ..exceptions import UndefinedMetricWarning from .base import _average_binary_score def auc(x, y, reorder=False): """Compute Area Under the Curve (AUC) using the trapezoidal rule This is a general function, given points on a curve. For computing the area under the ROC-curve, see :func:`roc_auc_score`. Parameters ---------- x : array, shape = [n] x coordinates. y : array, shape = [n] y coordinates. reorder : boolean, optional (default=False) If True, assume that the curve is ascending in the case of ties, as for an ROC curve. If the curve is non-ascending, the result will be wrong. Returns ------- auc : float Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) >>> metrics.auc(fpr, tpr) 0.75 See also -------- roc_auc_score : Computes the area under the ROC curve precision_recall_curve : Compute precision-recall pairs for different probability thresholds """ check_consistent_length(x, y) x = column_or_1d(x) y = column_or_1d(y) if x.shape[0] < 2: raise ValueError('At least 2 points are needed to compute' ' area under curve, but x.shape = %s' % x.shape) direction = 1 if reorder: # reorder the data points according to the x axis and using y to # break ties order = np.lexsort((y, x)) x, y = x[order], y[order] else: dx = np.diff(x) if np.any(dx < 0): if np.all(dx <= 0): direction = -1 else: raise ValueError("Reordering is not turned on, and " "the x array is not increasing: %s" % x) area = direction * np.trapz(y, x) if isinstance(area, np.memmap): # Reductions such as .sum used internally in np.trapz do not return a # scalar by default for numpy.memmap instances contrary to # regular numpy.ndarray instances. area = area.dtype.type(area) return area def average_precision_score(y_true, y_score, average="macro", sample_weight=None): """Compute average precision (AP) from prediction scores This score corresponds to the area under the precision-recall curve. Note: this implementation is restricted to the binary classification task or multilabel classification task. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- average_precision : float References ---------- .. [1] `Wikipedia entry for the Average precision <https://en.wikipedia.org/wiki/Average_precision>`_ See also -------- roc_auc_score : Area under the ROC curve precision_recall_curve : Compute precision-recall pairs for different probability thresholds Examples -------- >>> import numpy as np >>> from sklearn.metrics import average_precision_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS 0.79... """ def _binary_average_precision(y_true, y_score, sample_weight=None): precision, recall, thresholds = precision_recall_curve( y_true, y_score, sample_weight=sample_weight) return auc(recall, precision) return _average_binary_score(_binary_average_precision, y_true, y_score, average, sample_weight=sample_weight) def roc_auc_score(y_true, y_score, average="macro", sample_weight=None): """Compute Area Under the Curve (AUC) from prediction scores Note: this implementation is restricted to the binary classification task or multilabel classification task in label indicator format. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- auc : float References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ See also -------- average_precision_score : Area under the precision-recall curve roc_curve : Compute Receiver operating characteristic (ROC) Examples -------- >>> import numpy as np >>> from sklearn.metrics import roc_auc_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> roc_auc_score(y_true, y_scores) 0.75 """ def _binary_roc_auc_score(y_true, y_score, sample_weight=None): if len(np.unique(y_true)) != 2: raise ValueError("Only one class present in y_true. ROC AUC score " "is not defined in that case.") fpr, tpr, tresholds = roc_curve(y_true, y_score, sample_weight=sample_weight) return auc(fpr, tpr, reorder=True) return _average_binary_score( _binary_roc_auc_score, y_true, y_score, average, sample_weight=sample_weight) def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : array, shape = [n_samples] True targets of binary classification y_score : array, shape = [n_samples] Estimated probabilities or decision function pos_label : int, optional (default=None) The label of the positive class sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fps : array, shape = [n_thresholds] A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : array, shape = [n_thresholds <= len(np.unique(y_score))] An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : array, shape = [n_thresholds] Decreasing score values. """ check_consistent_length(y_true, y_score) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) assert_all_finite(y_true) assert_all_finite(y_score) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) # ensure binary classification if pos_label is not specified classes = np.unique(y_true) if (pos_label is None and not (array_equal(classes, [0, 1]) or array_equal(classes, [-1, 1]) or array_equal(classes, [0]) or array_equal(classes, [-1]) or array_equal(classes, [1]))): raise ValueError("Data is not binary and pos_label is not specified") elif pos_label is None: pos_label = 1. # make y_true a boolean vector y_true = (y_true == pos_label) # sort scores and corresponding truth values desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1] y_score = y_score[desc_score_indices] y_true = y_true[desc_score_indices] if sample_weight is not None: weight = sample_weight[desc_score_indices] else: weight = 1. # y_score typically has many tied values. Here we extract # the indices associated with the distinct values. We also # concatenate a value for the end of the curve. distinct_value_indices = np.where(np.diff(y_score))[0] threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1] # accumulate the true positives with decreasing threshold tps = stable_cumsum(y_true * weight)[threshold_idxs] if sample_weight is not None: fps = stable_cumsum(weight)[threshold_idxs] - tps else: fps = 1 + threshold_idxs - tps return fps, tps, y_score[threshold_idxs] def precision_recall_curve(y_true, probas_pred, pos_label=None, sample_weight=None): """Compute precision-recall pairs for different probability thresholds Note: this implementation is restricted to the binary classification task. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The last precision and recall values are 1. and 0. respectively and do not have a corresponding threshold. This ensures that the graph starts on the x axis. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] True targets of binary classification in range {-1, 1} or {0, 1}. probas_pred : array, shape = [n_samples] Estimated probabilities or decision function. pos_label : int, optional (default=None) The label of the positive class sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision : array, shape = [n_thresholds + 1] Precision values such that element i is the precision of predictions with score >= thresholds[i] and the last element is 1. recall : array, shape = [n_thresholds + 1] Decreasing recall values such that element i is the recall of predictions with score >= thresholds[i] and the last element is 0. thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))] Increasing thresholds on the decision function used to compute precision and recall. Examples -------- >>> import numpy as np >>> from sklearn.metrics import precision_recall_curve >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> precision, recall, thresholds = precision_recall_curve( ... y_true, y_scores) >>> precision # doctest: +ELLIPSIS array([ 0.66..., 0.5 , 1. , 1. ]) >>> recall array([ 1. , 0.5, 0.5, 0. ]) >>> thresholds array([ 0.35, 0.4 , 0.8 ]) """ fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred, pos_label=pos_label, sample_weight=sample_weight) precision = tps / (tps + fps) recall = tps / tps[-1] # stop when full recall attained # and reverse the outputs so recall is decreasing last_ind = tps.searchsorted(tps[-1]) sl = slice(last_ind, None, -1) return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl] def roc_curve(y_true, y_score, pos_label=None, sample_weight=None, drop_intermediate=True): """Compute Receiver operating characteristic (ROC) Note: this implementation is restricted to the binary classification task. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] True binary labels in range {0, 1} or {-1, 1}. If labels are not binary, pos_label should be explicitly given. y_score : array, shape = [n_samples] Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int Label considered as positive and others are considered negative. sample_weight : array-like of shape = [n_samples], optional Sample weights. drop_intermediate : boolean, optional (default=True) Whether to drop some suboptimal thresholds which would not appear on a plotted ROC curve. This is useful in order to create lighter ROC curves. .. versionadded:: 0.17 parameter *drop_intermediate*. Returns ------- fpr : array, shape = [>2] Increasing false positive rates such that element i is the false positive rate of predictions with score >= thresholds[i]. tpr : array, shape = [>2] Increasing true positive rates such that element i is the true positive rate of predictions with score >= thresholds[i]. thresholds : array, shape = [n_thresholds] Decreasing thresholds on the decision function used to compute fpr and tpr. `thresholds[0]` represents no instances being predicted and is arbitrarily set to `max(y_score) + 1`. See also -------- roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores Notes ----- Since the thresholds are sorted from low to high values, they are reversed upon returning them to ensure they correspond to both ``fpr`` and ``tpr``, which are sorted in reversed order during their calculation. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2) >>> fpr array([ 0. , 0.5, 0.5, 1. ]) >>> tpr array([ 0.5, 0.5, 1. , 1. ]) >>> thresholds array([ 0.8 , 0.4 , 0.35, 0.1 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight) # Attempt to drop thresholds corresponding to points in between and # collinear with other points. These are always suboptimal and do not # appear on a plotted ROC curve (and thus do not affect the AUC). # Here np.diff(_, 2) is used as a "second derivative" to tell if there # is a corner at the point. Both fps and tps must be tested to handle # thresholds with multiple data points (which are combined in # _binary_clf_curve). This keeps all cases where the point should be kept, # but does not drop more complicated cases like fps = [1, 3, 7], # tps = [1, 2, 4]; there is no harm in keeping too many thresholds. if drop_intermediate and len(fps) > 2: optimal_idxs = np.where(np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True])[0] fps = fps[optimal_idxs] tps = tps[optimal_idxs] thresholds = thresholds[optimal_idxs] if tps.size == 0 or fps[0] != 0: # Add an extra threshold position if necessary tps = np.r_[0, tps] fps = np.r_[0, fps] thresholds = np.r_[thresholds[0] + 1, thresholds] if fps[-1] <= 0: warnings.warn("No negative samples in y_true, " "false positive value should be meaningless", UndefinedMetricWarning) fpr = np.repeat(np.nan, fps.shape) else: fpr = fps / fps[-1] if tps[-1] <= 0: warnings.warn("No positive samples in y_true, " "true positive value should be meaningless", UndefinedMetricWarning) tpr = np.repeat(np.nan, tps.shape) else: tpr = tps / tps[-1] return fpr, tpr, thresholds def label_ranking_average_precision_score(y_true, y_score): """Compute ranking-based average precision Label ranking average precision (LRAP) is the average over each ground truth label assigned to each sample, of the ratio of true vs. total labels with lower score. This metric is used in multilabel ranking problem, where the goal is to give better rank to the labels associated to each sample. The obtained score is always strictly greater than 0 and the best value is 1. Read more in the :ref:`User Guide <label_ranking_average_precision>`. Parameters ---------- y_true : array or sparse matrix, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). Returns ------- score : float Examples -------- >>> import numpy as np >>> from sklearn.metrics import label_ranking_average_precision_score >>> y_true = np.array([[1, 0, 0], [0, 0, 1]]) >>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]]) >>> label_ranking_average_precision_score(y_true, y_score) \ # doctest: +ELLIPSIS 0.416... """ check_consistent_length(y_true, y_score) y_true = check_array(y_true, ensure_2d=False) y_score = check_array(y_score, ensure_2d=False) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") # Handle badly formated array and the degenerate case with one label y_type = type_of_target(y_true) if (y_type != "multilabel-indicator" and not (y_type == "binary" and y_true.ndim == 2)): raise ValueError("{0} format is not supported".format(y_type)) y_true = csr_matrix(y_true) y_score = -y_score n_samples, n_labels = y_true.shape out = 0. for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])): relevant = y_true.indices[start:stop] if (relevant.size == 0 or relevant.size == n_labels): # If all labels are relevant or unrelevant, the score is also # equal to 1. The label ranking has no meaning. out += 1. continue scores_i = y_score[i] rank = rankdata(scores_i, 'max')[relevant] L = rankdata(scores_i[relevant], 'max') out += (L / rank).mean() return out / n_samples def coverage_error(y_true, y_score, sample_weight=None): """Coverage error measure Compute how far we need to go through the ranked scores to cover all true labels. The best value is equal to the average number of labels in ``y_true`` per sample. Ties in ``y_scores`` are broken by giving maximal rank that would have been assigned to all tied values. Read more in the :ref:`User Guide <coverage_error>`. Parameters ---------- y_true : array, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- coverage_error : float References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. """ y_true = check_array(y_true, ensure_2d=False) y_score = check_array(y_score, ensure_2d=False) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true) if y_type != "multilabel-indicator": raise ValueError("{0} format is not supported".format(y_type)) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true)) y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1)) coverage = (y_score >= y_min_relevant).sum(axis=1) coverage = coverage.filled(0) return np.average(coverage, weights=sample_weight) def label_ranking_loss(y_true, y_score, sample_weight=None): """Compute Ranking loss measure Compute the average number of label pairs that are incorrectly ordered given y_score weighted by the size of the label set and the number of labels not in the label set. This is similar to the error set size, but weighted by the number of relevant and irrelevant labels. The best performance is achieved with a ranking loss of zero. Read more in the :ref:`User Guide <label_ranking_loss>`. .. versionadded:: 0.17 A function *label_ranking_loss* Parameters ---------- y_true : array or sparse matrix, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. """ y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr') y_score = check_array(y_score, ensure_2d=False) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true) if y_type not in ("multilabel-indicator",): raise ValueError("{0} format is not supported".format(y_type)) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") n_samples, n_labels = y_true.shape y_true = csr_matrix(y_true) loss = np.zeros(n_samples) for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])): # Sort and bin the label scores unique_scores, unique_inverse = np.unique(y_score[i], return_inverse=True) true_at_reversed_rank = bincount( unique_inverse[y_true.indices[start:stop]], minlength=len(unique_scores)) all_at_reversed_rank = bincount(unique_inverse, minlength=len(unique_scores)) false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank # if the scores are ordered, it's possible to count the number of # incorrectly ordered paires in linear time by cumulatively counting # how many false labels of a given score have a score higher than the # accumulated true labels with lower score. loss[i] = np.dot(true_at_reversed_rank.cumsum(), false_at_reversed_rank) n_positives = count_nonzero(y_true, axis=1) with np.errstate(divide="ignore", invalid="ignore"): loss /= ((n_labels - n_positives) * n_positives) # When there is no positive or no negative labels, those values should # be consider as correct, i.e. the ranking doesn't matter. loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0. return np.average(loss, weights=sample_weight)
bsd-3-clause
datapythonista/pandas
asv_bench/benchmarks/reindex.py
3
4731
import numpy as np from pandas import ( DataFrame, Index, MultiIndex, Series, date_range, period_range, ) from .pandas_vb_common import tm class Reindex: def setup(self): rng = date_range(start="1/1/1970", periods=10000, freq="1min") self.df = DataFrame(np.random.rand(10000, 10), index=rng, columns=range(10)) self.df["foo"] = "bar" self.rng_subset = Index(rng[::2]) self.df2 = DataFrame( index=range(10000), data=np.random.rand(10000, 30), columns=range(30) ) N = 5000 K = 200 level1 = tm.makeStringIndex(N).values.repeat(K) level2 = np.tile(tm.makeStringIndex(K).values, N) index = MultiIndex.from_arrays([level1, level2]) self.s = Series(np.random.randn(N * K), index=index) self.s_subset = self.s[::2] def time_reindex_dates(self): self.df.reindex(self.rng_subset) def time_reindex_columns(self): self.df2.reindex(columns=self.df.columns[1:5]) def time_reindex_multiindex(self): self.s.reindex(self.s_subset.index) class ReindexMethod: params = [["pad", "backfill"], [date_range, period_range]] param_names = ["method", "constructor"] def setup(self, method, constructor): N = 100000 self.idx = constructor("1/1/2000", periods=N, freq="1min") self.ts = Series(np.random.randn(N), index=self.idx)[::2] def time_reindex_method(self, method, constructor): self.ts.reindex(self.idx, method=method) class Fillna: params = ["pad", "backfill"] param_names = ["method"] def setup(self, method): N = 100000 self.idx = date_range("1/1/2000", periods=N, freq="1min") ts = Series(np.random.randn(N), index=self.idx)[::2] self.ts_reindexed = ts.reindex(self.idx) self.ts_float32 = self.ts_reindexed.astype("float32") def time_reindexed(self, method): self.ts_reindexed.fillna(method=method) def time_float_32(self, method): self.ts_float32.fillna(method=method) class LevelAlign: def setup(self): self.index = MultiIndex( levels=[np.arange(10), np.arange(100), np.arange(100)], codes=[ np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10), ], ) self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index) self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1]) def time_align_level(self): self.df.align(self.df_level, level=1, copy=False) def time_reindex_level(self): self.df_level.reindex(self.index, level=1) class DropDuplicates: params = [True, False] param_names = ["inplace"] def setup(self, inplace): N = 10000 K = 10 key1 = tm.makeStringIndex(N).values.repeat(K) key2 = tm.makeStringIndex(N).values.repeat(K) self.df = DataFrame( {"key1": key1, "key2": key2, "value": np.random.randn(N * K)} ) self.df_nan = self.df.copy() self.df_nan.iloc[:10000, :] = np.nan self.s = Series(np.random.randint(0, 1000, size=10000)) self.s_str = Series(np.tile(tm.makeStringIndex(1000).values, 10)) N = 1000000 K = 10000 key1 = np.random.randint(0, K, size=N) self.df_int = DataFrame({"key1": key1}) self.df_bool = DataFrame(np.random.randint(0, 2, size=(K, 10), dtype=bool)) def time_frame_drop_dups(self, inplace): self.df.drop_duplicates(["key1", "key2"], inplace=inplace) def time_frame_drop_dups_na(self, inplace): self.df_nan.drop_duplicates(["key1", "key2"], inplace=inplace) def time_series_drop_dups_int(self, inplace): self.s.drop_duplicates(inplace=inplace) def time_series_drop_dups_string(self, inplace): self.s_str.drop_duplicates(inplace=inplace) def time_frame_drop_dups_int(self, inplace): self.df_int.drop_duplicates(inplace=inplace) def time_frame_drop_dups_bool(self, inplace): self.df_bool.drop_duplicates(inplace=inplace) class Align: # blog "pandas escaped the zoo" def setup(self): n = 50000 indices = tm.makeStringIndex(n) subsample_size = 40000 self.x = Series(np.random.randn(n), indices) self.y = Series( np.random.randn(subsample_size), index=np.random.choice(indices, subsample_size, replace=False), ) def time_align_series_irregular_string(self): self.x + self.y from .pandas_vb_common import setup # noqa: F401 isort:skip
bsd-3-clause
krez13/scikit-learn
sklearn/datasets/mldata.py
309
7838
"""Automatically download MLdata datasets.""" # Copyright (c) 2011 Pietro Berkes # License: BSD 3 clause import os from os.path import join, exists import re import numbers try: # Python 2 from urllib2 import HTTPError from urllib2 import quote from urllib2 import urlopen except ImportError: # Python 3+ from urllib.error import HTTPError from urllib.parse import quote from urllib.request import urlopen import numpy as np import scipy as sp from scipy import io from shutil import copyfileobj from .base import get_data_home, Bunch MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s" def mldata_filename(dataname): """Convert a raw name for a data set in a mldata.org filename.""" dataname = dataname.lower().replace(' ', '-') return re.sub(r'[().]', '', dataname) def fetch_mldata(dataname, target_name='label', data_name='data', transpose_data=True, data_home=None): """Fetch an mldata.org data set If the file does not exist yet, it is downloaded from mldata.org . mldata.org does not have an enforced convention for storing data or naming the columns in a data set. The default behavior of this function works well with the most common cases: 1) data values are stored in the column 'data', and target values in the column 'label' 2) alternatively, the first column stores target values, and the second data values 3) the data array is stored as `n_features x n_samples` , and thus needs to be transposed to match the `sklearn` standard Keyword arguments allow to adapt these defaults to specific data sets (see parameters `target_name`, `data_name`, `transpose_data`, and the examples below). mldata.org data sets may have multiple columns, which are stored in the Bunch object with their original name. Parameters ---------- dataname: Name of the data set on mldata.org, e.g.: "leukemia", "Whistler Daily Snowfall", etc. The raw name is automatically converted to a mldata.org URL . target_name: optional, default: 'label' Name or index of the column containing the target values. data_name: optional, default: 'data' Name or index of the column containing the data. transpose_data: optional, default: True If True, transpose the downloaded data array. data_home: optional, default: None Specify another download and cache folder for the data sets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'data', the data to learn, 'target', the classification labels, 'DESCR', the full description of the dataset, and 'COL_NAMES', the original names of the dataset columns. Examples -------- Load the 'iris' dataset from mldata.org: >>> from sklearn.datasets.mldata import fetch_mldata >>> import tempfile >>> test_data_home = tempfile.mkdtemp() >>> iris = fetch_mldata('iris', data_home=test_data_home) >>> iris.target.shape (150,) >>> iris.data.shape (150, 4) Load the 'leukemia' dataset from mldata.org, which needs to be transposed to respects the sklearn axes convention: >>> leuk = fetch_mldata('leukemia', transpose_data=True, ... data_home=test_data_home) >>> leuk.data.shape (72, 7129) Load an alternative 'iris' dataset, which has different names for the columns: >>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1, ... data_name=0, data_home=test_data_home) >>> iris3 = fetch_mldata('datasets-UCI iris', ... target_name='class', data_name='double0', ... data_home=test_data_home) >>> import shutil >>> shutil.rmtree(test_data_home) """ # normalize dataset name dataname = mldata_filename(dataname) # check if this data set has been already downloaded data_home = get_data_home(data_home=data_home) data_home = join(data_home, 'mldata') if not exists(data_home): os.makedirs(data_home) matlab_name = dataname + '.mat' filename = join(data_home, matlab_name) # if the file does not exist, download it if not exists(filename): urlname = MLDATA_BASE_URL % quote(dataname) try: mldata_url = urlopen(urlname) except HTTPError as e: if e.code == 404: e.msg = "Dataset '%s' not found on mldata.org." % dataname raise # store Matlab file try: with open(filename, 'w+b') as matlab_file: copyfileobj(mldata_url, matlab_file) except: os.remove(filename) raise mldata_url.close() # load dataset matlab file with open(filename, 'rb') as matlab_file: matlab_dict = io.loadmat(matlab_file, struct_as_record=True) # -- extract data from matlab_dict # flatten column names col_names = [str(descr[0]) for descr in matlab_dict['mldata_descr_ordering'][0]] # if target or data names are indices, transform then into names if isinstance(target_name, numbers.Integral): target_name = col_names[target_name] if isinstance(data_name, numbers.Integral): data_name = col_names[data_name] # rules for making sense of the mldata.org data format # (earlier ones have priority): # 1) there is only one array => it is "data" # 2) there are multiple arrays # a) copy all columns in the bunch, using their column name # b) if there is a column called `target_name`, set "target" to it, # otherwise set "target" to first column # c) if there is a column called `data_name`, set "data" to it, # otherwise set "data" to second column dataset = {'DESCR': 'mldata.org dataset: %s' % dataname, 'COL_NAMES': col_names} # 1) there is only one array => it is considered data if len(col_names) == 1: data_name = col_names[0] dataset['data'] = matlab_dict[data_name] # 2) there are multiple arrays else: for name in col_names: dataset[name] = matlab_dict[name] if target_name in col_names: del dataset[target_name] dataset['target'] = matlab_dict[target_name] else: del dataset[col_names[0]] dataset['target'] = matlab_dict[col_names[0]] if data_name in col_names: del dataset[data_name] dataset['data'] = matlab_dict[data_name] else: del dataset[col_names[1]] dataset['data'] = matlab_dict[col_names[1]] # set axes to sklearn conventions if transpose_data: dataset['data'] = dataset['data'].T if 'target' in dataset: if not sp.sparse.issparse(dataset['target']): dataset['target'] = dataset['target'].squeeze() return Bunch(**dataset) # The following is used by nosetests to setup the docstring tests fixture def setup_module(module): # setup mock urllib2 module to avoid downloading from mldata.org from sklearn.utils.testing import install_mldata_mock install_mldata_mock({ 'iris': { 'data': np.empty((150, 4)), 'label': np.empty(150), }, 'datasets-uci-iris': { 'double0': np.empty((150, 4)), 'class': np.empty((150,)), }, 'leukemia': { 'data': np.empty((72, 7129)), }, }) def teardown_module(module): from sklearn.utils.testing import uninstall_mldata_mock uninstall_mldata_mock()
bsd-3-clause
tseaver/google-cloud-python
firestore/docs/conf.py
2
11885
# -*- coding: utf-8 -*- # # google-cloud-firestore documentation build configuration file # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) __version__ = "0.1.0" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "1.6.3" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", ] # autodoc/autosummary flags autoclass_content = "both" autodoc_default_flags = ["members"] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # Allow markdown includes (so releases.md can include CHANGLEOG.md) # http://www.sphinx-doc.org/en/master/markdown.html source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = [".rst", ".md"] # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = u"google-cloud-firestore" copyright = u"2017, Google" author = u"Google APIs" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = __version__ # The short X.Y version. version = ".".join(release.split(".")[0:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "description": "Google Cloud Client Libraries for Python", "github_user": "googleapis", "github_repo": "google-cloud-python", "github_banner": True, "font_family": "'Roboto', Georgia, sans", "head_font_family": "'Roboto', Georgia, serif", "code_font_family": "'Roboto Mono', 'Consolas', monospace", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "google-cloud-firestore-doc" # -- Options for warnings ------------------------------------------------------ suppress_warnings = [ # Temporarily suppress this to avoid "more than one target found for # cross-reference" warning, which are intractable for us to avoid while in # a mono-repo. # See https://github.com/sphinx-doc/sphinx/blob # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 "ref.python" ] # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "google-cloud-firestore.tex", u"google-cloud-firestore Documentation", author, "manual", ) ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( master_doc, "google-cloud-firestore", u"google-cloud-firestore Documentation", [author], 1, ) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "google-cloud-firestore", u"google-cloud-firestore Documentation", author, "google-cloud-firestore", "GAPIC library for the {metadata.shortName}", "APIs", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "gax": ("https://gax-python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), "grpc": ("https://grpc.io/grpc/python/", None), "requests": ("https://requests.kennethreitz.org/en/stable/", None), "fastavro": ("https://fastavro.readthedocs.io/en/stable/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), } # Napoleon settings napoleon_google_docstring = True napoleon_numpy_docstring = True napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False napoleon_use_ivar = False napoleon_use_param = True napoleon_use_rtype = True
apache-2.0
jblackburne/scikit-learn
examples/plot_multioutput_face_completion.py
330
3019
""" ============================================== Face completion with a multi-output estimators ============================================== This example shows the use of multi-output estimator to complete images. The goal is to predict the lower half of a face given its upper half. The first column of images shows true faces. The next columns illustrate how extremely randomized trees, k nearest neighbors, linear regression and ridge regression complete the lower half of those faces. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.utils.validation import check_random_state from sklearn.ensemble import ExtraTreesRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression from sklearn.linear_model import RidgeCV # Load the faces datasets data = fetch_olivetti_faces() targets = data.target data = data.images.reshape((len(data.images), -1)) train = data[targets < 30] test = data[targets >= 30] # Test on independent people # Test on a subset of people n_faces = 5 rng = check_random_state(4) face_ids = rng.randint(test.shape[0], size=(n_faces, )) test = test[face_ids, :] n_pixels = data.shape[1] X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces X_test = test[:, :np.ceil(0.5 * n_pixels)] y_test = test[:, np.floor(0.5 * n_pixels):] # Fit estimators ESTIMATORS = { "Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32, random_state=0), "K-nn": KNeighborsRegressor(), "Linear regression": LinearRegression(), "Ridge": RidgeCV(), } y_test_predict = dict() for name, estimator in ESTIMATORS.items(): estimator.fit(X_train, y_train) y_test_predict[name] = estimator.predict(X_test) # Plot the completed faces image_shape = (64, 64) n_cols = 1 + len(ESTIMATORS) plt.figure(figsize=(2. * n_cols, 2.26 * n_faces)) plt.suptitle("Face completion with multi-output estimators", size=16) for i in range(n_faces): true_face = np.hstack((X_test[i], y_test[i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces") sub.axis("off") sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") for j, est in enumerate(sorted(ESTIMATORS)): completed_face = np.hstack((X_test[i], y_test_predict[est][i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est) sub.axis("off") sub.imshow(completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest") plt.show()
bsd-3-clause
jcrudy/glm-sklearn
glmsklearn/glm.py
1
9972
import statsmodels.api import statsmodels.genmod.families.family import numpy as np from sklearn.metrics import r2_score class GLM(object): ''' A scikit-learn style wrapper for statsmodels.api.GLM. The purpose of this class is to make generalized linear models compatible with scikit-learn's Pipeline objects. family : instance of subclass of statsmodels.genmod.families.family.Family The family argument determines the distribution family to use for GLM fitting. xlabels : iterable of strings, optional (empty by default) The xlabels argument can be used to assign names to data columns. This argument is not generally needed, as names can be captured automatically from most standard data structures. If included, must have length n, where n is the number of features. Note that column order is used to compute term values and make predictions, not column names. ''' def __init__(self, family, add_constant=True): self.family = family self.add_constant = add_constant def _scrub_x(self, X, offset, exposure, **kwargs): ''' Sanitize input predictors and extract column names if appropriate. ''' no_labels = False if 'xlabels' not in kwargs and 'xlabels' not in self.__dict__: #Try to get xlabels from input data (for example, if X is a pandas DataFrame) try: self.xlabels = list(X.columns) except AttributeError: try: self.xlabels = list(X.design_info.column_names) except AttributeError: try: self.xlabels = list(X.dtype.names) except TypeError: no_labels = True elif 'xlabels' not in self.__dict__: self.xlabels = kwargs['xlabels'] #Convert to internally used data type X = np.asarray(X,dtype=np.float64) m,n = X.shape if offset is not None: offset = np.asarray(offset,dtype=np.float64) offset = offset.reshape(offset.shape[0]) if exposure is not None: exposure = np.asarray(exposure,dtype=np.float64) exposure = exposure.reshape(exposure.shape[0]) #Make up labels if none were found if no_labels: self.xlabels = ['x'+str(i) for i in range(n)] return X, offset, exposure def _scrub(self, X, y, offset, exposure, **kwargs): ''' Sanitize input data. ''' #Check whether X is the output of patsy.dmatrices if y is None and type(X) is tuple: y, X = X #Handle X separately X, offset, exposure = self._scrub_x(X, offset, exposure, **kwargs) #Convert y to internally used data type y = np.asarray(y,dtype=np.float64) y = y.reshape(y.shape[0]) #Make sure dimensions match if y.shape[0] != X.shape[0]: raise ValueError('X and y do not have compatible dimensions.') return X, y, offset, exposure def fit(self, X, y = None, offset = None, exposure = None, xlabels = None): ''' Fit a GLM model to the input data X and y. Parameters ---------- X : array-like, shape = [m, n] where m is the number of samples and n is the number of features The training predictors. The X parameter can be a numpy array, a pandas DataFrame, a patsy DesignMatrix, or a tuple of patsy DesignMatrix objects as output by patsy.dmatrices. y : array-like, optional (default=None), shape = [m] where m is the number of samples The training response. The y parameter can be a numpy array, a pandas DataFrame with one column, a Patsy DesignMatrix, or can be left as None (default) if X was the output of a call to patsy.dmatrices (in which case, X contains the response). xlabels : iterable of strings, optional (default=None) Convenient way to set the xlabels parameter while calling fit. Ignored if None (default). See the GLM class for an explanation of the xlabels parameter. ''' #Format and label the data if xlabels is not None: self.set_params(xlabels=xlabels) X, y, offset, exposure = self._scrub(X,y,offset,exposure,**self.__dict__) #Add a constant column if self.add_constant: X = statsmodels.api.add_constant(X, prepend=True) #Do the actual work model = statsmodels.api.GLM(y, X, self.family, offset=offset, exposure=exposure) result = model.fit() self.coef_ = result.params return self def predict(self, X, offset = None, exposure = None): ''' Predict the response based on the input data X. Parameters ---------- X : array-like, shape = [m, n] where m is the number of samples and n is the number of features The training predictors. The X parameter can be a numpy array, a pandas DataFrame, or a patsy DesignMatrix. ''' #Format the data X, offset, exposure = self._scrub_x(X, offset, exposure) #Linear transformation eta = self.transform(X, offset, exposure) #Nonlinear transformation y_hat = self.family.fitted(eta) return y_hat def transform(self, X, offset = None, exposure = None): ''' Perform a linear transformation of X. Parameters ---------- X : array-like, shape = [m, n] where m is the number of samples and n is the number of features The training predictors. The X parameter can be a numpy array, a pandas DataFrame, or a patsy DesignMatrix. ''' #Format the data X, offset, exposure = self._scrub_x(X, offset, exposure) #Add a constant column if self.add_constant: X = statsmodels.api.add_constant(X, prepend=True) #Compute linear combination eta = np.dot(X,self.coef_) #Apply offset and exposure if offset is not None: eta += offset if exposure is not None: eta += np.log(exposure) return eta def score(self, X, y = None, offset = None, exposure = None, xlabels = None): X, y, offset, exposure = self._scrub(X,y,offset,exposure,**self.__dict__) y_pred = self.predict(X, offset=offset, exposure=exposure) return r2_score(y, y_pred) def get_params(self, deep = False): return {} def __repr__(self): return self.__class__.__name__ + '()' def __str__(self): return self.__class__.__name__ + '()' class GLMFamily(GLM): family = NotImplemented def __init__(self, add_constant=True): super(GLMFamily,self).__init__(family=self.__class__.family(), add_constant=add_constant) class BinomialRegressor(GLMFamily): family = statsmodels.genmod.families.family.Binomial class GammaRegressor(GLMFamily): family = statsmodels.genmod.families.family.Gamma class GaussianRegressor(GLMFamily): family = statsmodels.genmod.families.family.Gaussian class InverseGaussianRegressor(GLMFamily): family = statsmodels.genmod.families.family.InverseGaussian class NegativeBinomialRegressor(GLMFamily): family = statsmodels.genmod.families.family.NegativeBinomial class PoissonRegressor(GLMFamily): family = statsmodels.genmod.families.family.Poisson # def fit(self, X, y = None, exposure = None, xlabels = None): # ''' # Fit a GLM model to the input data X and y. # # # Parameters # ---------- # X : array-like, shape = [m, n] where m is the number of samples and n is the number of features # The training predictors. The X parameter can be a numpy array, a pandas DataFrame, a patsy # DesignMatrix, or a tuple of patsy DesignMatrix objects as output by patsy.dmatrices. # # # y : array-like, optional (default=None), shape = [m] where m is the number of samples # The training response. The y parameter can be a numpy array, a pandas DataFrame with one # column, a Patsy DesignMatrix, or can be left as None (default) if X was the output of a # call to patsy.dmatrices (in which case, X contains the response). # # # xlabels : iterable of strings, optional (default=None) # Convenient way to set the xlabels parameter while calling fit. Ignored if None (default). # See the GLM class for an explanation of the xlabels parameter. # # ''' # #Format and label the data # if xlabels is not None: # self.set_params(xlabels=xlabels) # X, y = self._scrub(X,y,**self.__dict__) # if exposure is not None: # exposure = np.asarray(exposure) # exposure = exposure.reshape(exposure.shape[0]) # if exposure.shape != y.shape: # raise ValueError('Shape of exposure does not match shape of y.') # # #Add a constant column # if self.add_constant: # X = statsmodels.api.add_constant(X, prepend=True) # # #Do the actual work # if exposure is None: # model = statsmodels.api.GLM(y, X, self.family) # else: # model = statsmodels.api.GLM(y, X, self.family, exposure=exposure) # result = model.fit() # self.coef_ = result.params # # return self
bsd-3-clause
nesterione/scikit-learn
examples/svm/plot_svm_margin.py
318
2328
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= SVM Margins Example ========================================================= The plots below illustrate the effect the parameter `C` has on the separation line. A large value of `C` basically tells our model that we do not have that much faith in our data's distribution, and will only consider points close to line of separation. A small value of `C` includes more/all the observations, allowing the margins to be calculated using all the data in the area. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import svm # we create 40 separable points np.random.seed(0) X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]] Y = [0] * 20 + [1] * 20 # figure number fignum = 1 # fit the model for name, penalty in (('unreg', 1), ('reg', 0.05)): clf = svm.SVC(kernel='linear', C=penalty) clf.fit(X, Y) # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - (clf.intercept_[0]) / w[1] # plot the parallels to the separating hyperplane that pass through the # support vectors margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2)) yy_down = yy + a * margin yy_up = yy - a * margin # plot the line, the points, and the nearest vectors to the plane plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.plot(xx, yy, 'k-') plt.plot(xx, yy_down, 'k--') plt.plot(xx, yy_up, 'k--') plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired) plt.axis('tight') x_min = -4.8 x_max = 4.2 y_min = -6 y_max = 6 XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.predict(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.figure(fignum, figsize=(4, 3)) plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) fignum = fignum + 1 plt.show()
bsd-3-clause
Querschlag/tickmate
analysis/tmkit/database.py
3
1997
import sqlite3 import numpy as np import pandas as pd import datetime import sys def cached_property(f): """returns a cached property that is calculated by function f""" def get(self): try: return self._property_cache[f] except AttributeError: self._property_cache = {} x = self._property_cache[f] = f(self) return x except KeyError: x = self._property_cache[f] = f(self) return x return property(get) class TickmateDatabase(object): def __init__(self, filename, hide_names=False): self.conn = sqlite3.connect(filename) self.hide_names = hide_names @cached_property def tracks(self): _tracks = pd.read_sql("select _id as id, name " + "from tracks order by \"order\"", self.conn, index_col='id') if self.hide_names: _tracks.name = pd.Series(["Track {0}".format(n) for n in range(1, _tracks.shape[0] + 1)], index=_tracks.index) return _tracks @cached_property def ticks(self): _ticks = pd.read_sql("select *, _track_id as track_id " + "from ticks order by \"year\", \"month\", \"day\"", self.conn, index_col='_id') _ticks['date'] = pd.to_datetime( _ticks.year.astype(str) + "/" + (_ticks.month + 1).astype(str) + "/" + _ticks.day.astype(str), coerce=True) _ticks['count'] = 1 _ticks = _ticks.groupby(('track_id', 'date'))['count'].count() return _ticks @cached_property def timeseries(self): _timeseries = pd.DataFrame() for track_id, track in self.tracks.iterrows(): _timeseries[track_id] = self.ticks[track_id].resample('D').fillna(0) return _timeseries @cached_property def date_range(self): return pd.date_range(self.ticks.reset_index().iloc[0].date, self.ticks.reset_index().iloc[-1].date)
gpl-3.0
glouppe/scikit-learn
sklearn/model_selection/tests/test_validation.py
20
27961
"""Test the validation module""" from __future__ import division import sys import warnings import numpy as np from scipy.sparse import coo_matrix, csr_matrix from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_warns from sklearn.utils.mocking import CheckingClassifier, MockDataFrame from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_predict from sklearn.model_selection import permutation_test_score from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import LeaveOneOut from sklearn.model_selection import LeaveOneLabelOut from sklearn.model_selection import LeavePLabelOut from sklearn.model_selection import LabelKFold from sklearn.model_selection import LabelShuffleSplit from sklearn.model_selection import learning_curve from sklearn.model_selection import validation_curve from sklearn.model_selection._validation import _check_is_permutation from sklearn.datasets import make_regression from sklearn.datasets import load_boston from sklearn.datasets import load_iris from sklearn.metrics import explained_variance_score from sklearn.metrics import make_scorer from sklearn.metrics import precision_score from sklearn.linear_model import Ridge from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.cluster import KMeans from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.base import BaseEstimator from sklearn.multiclass import OneVsRestClassifier from sklearn.datasets import make_classification from sklearn.datasets import make_multilabel_classification from test_split import MockClassifier class MockImprovingEstimator(BaseEstimator): """Dummy classifier to test the learning curve""" def __init__(self, n_max_train_sizes): self.n_max_train_sizes = n_max_train_sizes self.train_sizes = 0 self.X_subset = None def fit(self, X_subset, y_subset=None): self.X_subset = X_subset self.train_sizes = X_subset.shape[0] return self def predict(self, X): raise NotImplementedError def score(self, X=None, Y=None): # training score becomes worse (2 -> 1), test error better (0 -> 1) if self._is_training_data(X): return 2. - float(self.train_sizes) / self.n_max_train_sizes else: return float(self.train_sizes) / self.n_max_train_sizes def _is_training_data(self, X): return X is self.X_subset class MockIncrementalImprovingEstimator(MockImprovingEstimator): """Dummy classifier that provides partial_fit""" def __init__(self, n_max_train_sizes): super(MockIncrementalImprovingEstimator, self).__init__(n_max_train_sizes) self.x = None def _is_training_data(self, X): return self.x in X def partial_fit(self, X, y=None, **params): self.train_sizes += X.shape[0] self.x = X[0] class MockEstimatorWithParameter(BaseEstimator): """Dummy classifier to test the validation curve""" def __init__(self, param=0.5): self.X_subset = None self.param = param def fit(self, X_subset, y_subset): self.X_subset = X_subset self.train_sizes = X_subset.shape[0] return self def predict(self, X): raise NotImplementedError def score(self, X=None, y=None): return self.param if self._is_training_data(X) else 1 - self.param def _is_training_data(self, X): return X is self.X_subset # XXX: use 2D array, since 1D X is being detected as a single sample in # check_consistent_length X = np.ones((10, 2)) X_sparse = coo_matrix(X) y = np.arange(10) // 2 def test_cross_val_score(): clf = MockClassifier() for a in range(-10, 10): clf.a = a # Smoke test scores = cross_val_score(clf, X, y) assert_array_equal(scores, clf.score(X, y)) # test with multioutput y scores = cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) scores = cross_val_score(clf, X_sparse, y) assert_array_equal(scores, clf.score(X_sparse, y)) # test with multioutput y scores = cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) scores = cross_val_score(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) scores = cross_val_score(clf, X, y.tolist()) assert_raises(ValueError, cross_val_score, clf, X, y, scoring="sklearn") # test with 3d X and X_3d = X[:, :, np.newaxis] clf = MockClassifier(allow_nd=True) scores = cross_val_score(clf, X_3d, y) clf = MockClassifier(allow_nd=False) assert_raises(ValueError, cross_val_score, clf, X_3d, y) def test_cross_val_score_predict_labels(): # Check if ValueError (when labels is None) propagates to cross_val_score # and cross_val_predict # And also check if labels is correctly passed to the cv object X, y = make_classification(n_samples=20, n_classes=2, random_state=0) clf = SVC(kernel="linear") label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(), LabelShuffleSplit()] for cv in label_cvs: assert_raise_message(ValueError, "The labels parameter should not be None", cross_val_score, estimator=clf, X=X, y=y, cv=cv) assert_raise_message(ValueError, "The labels parameter should not be None", cross_val_predict, estimator=clf, X=X, y=y, cv=cv) def test_cross_val_score_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cross_val_score(clf, X_df, y_ser) def test_cross_val_score_mask(): # test that cross_val_score works with boolean masks svm = SVC(kernel="linear") iris = load_iris() X, y = iris.data, iris.target kfold = KFold(5) scores_indices = cross_val_score(svm, X, y, cv=kfold) kfold = KFold(5) cv_masks = [] for train, test in kfold.split(X, y): mask_train = np.zeros(len(y), dtype=np.bool) mask_test = np.zeros(len(y), dtype=np.bool) mask_train[train] = 1 mask_test[test] = 1 cv_masks.append((train, test)) scores_masks = cross_val_score(svm, X, y, cv=cv_masks) assert_array_equal(scores_indices, scores_masks) def test_cross_val_score_precomputed(): # test for svm with precomputed kernel svm = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target linear_kernel = np.dot(X, X.T) score_precomputed = cross_val_score(svm, linear_kernel, y) svm = SVC(kernel="linear") score_linear = cross_val_score(svm, X, y) assert_array_equal(score_precomputed, score_linear) # Error raised for non-square X svm = SVC(kernel="precomputed") assert_raises(ValueError, cross_val_score, svm, X, y) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cross_val_score, svm, linear_kernel.tolist(), y) def test_cross_val_score_fit_params(): clf = MockClassifier() n_samples = X.shape[0] n_classes = len(np.unique(y)) W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1)) P_sparse = coo_matrix(np.eye(5)) DUMMY_INT = 42 DUMMY_STR = '42' DUMMY_OBJ = object() def assert_fit_params(clf): # Function to test that the values are passed correctly to the # classifier arguments for non-array type assert_equal(clf.dummy_int, DUMMY_INT) assert_equal(clf.dummy_str, DUMMY_STR) assert_equal(clf.dummy_obj, DUMMY_OBJ) fit_params = {'sample_weight': np.ones(n_samples), 'class_prior': np.ones(n_classes) / n_classes, 'sparse_sample_weight': W_sparse, 'sparse_param': P_sparse, 'dummy_int': DUMMY_INT, 'dummy_str': DUMMY_STR, 'dummy_obj': DUMMY_OBJ, 'callback': assert_fit_params} cross_val_score(clf, X, y, fit_params=fit_params) def test_cross_val_score_score_func(): clf = MockClassifier() _score_func_args = [] def score_func(y_test, y_predict): _score_func_args.append((y_test, y_predict)) return 1.0 with warnings.catch_warnings(record=True): scoring = make_scorer(score_func) score = cross_val_score(clf, X, y, scoring=scoring) assert_array_equal(score, [1.0, 1.0, 1.0]) assert len(_score_func_args) == 3 def test_cross_val_score_errors(): class BrokenEstimator: pass assert_raises(TypeError, cross_val_score, BrokenEstimator(), X) def test_cross_val_score_with_score_func_classification(): iris = load_iris() clf = SVC(kernel='linear') # Default score (should be the accuracy score) scores = cross_val_score(clf, iris.data, iris.target, cv=5) assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2) # Correct classification score (aka. zero / one score) - should be the # same as the default estimator score zo_scores = cross_val_score(clf, iris.data, iris.target, scoring="accuracy", cv=5) assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2) # F1 score (class are balanced so f1_score should be equal to zero/one # score f1_scores = cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted", cv=5) assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2) def test_cross_val_score_with_score_func_regression(): X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) reg = Ridge() # Default score of the Ridge regression estimator scores = cross_val_score(reg, X, y, cv=5) assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # R2 score (aka. determination coefficient) - should be the # same as the default estimator score r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5) assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # Mean squared error; this is a loss function, so "scores" are negative mse_scores = cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error") expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) assert_array_almost_equal(mse_scores, expected_mse, 2) # Explained variance scoring = make_scorer(explained_variance_score) ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring) assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) def test_permutation_score(): iris = load_iris() X = iris.data X_sparse = coo_matrix(X) y = iris.target svm = SVC(kernel='linear') cv = StratifiedKFold(2) score, scores, pvalue = permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_greater(score, 0.9) assert_almost_equal(pvalue, 0.0, 1) score_label, _, pvalue_label = permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # check that we obtain the same results with a sparse representation svm_sparse = SVC(kernel='linear') cv_sparse = StratifiedKFold(2) score_label, _, pvalue_label = permutation_test_score( svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # test with custom scoring object def custom_score(y_true, y_pred): return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0]) scorer = make_scorer(custom_score) score, _, pvalue = permutation_test_score( svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0) assert_almost_equal(score, .93, 2) assert_almost_equal(pvalue, 0.01, 3) # set random y y = np.mod(np.arange(len(y)), 3) score, scores, pvalue = permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_less(score, 0.5) assert_greater(pvalue, 0.2) def test_permutation_test_score_allow_nans(): # Check that permutation_test_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) permutation_test_score(p, X, y, cv=5) def test_cross_val_score_allow_nans(): # Check that cross_val_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cross_val_score(p, X, y, cv=5) def test_cross_val_score_multilabel(): X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1], [-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]]) y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]]) clf = KNeighborsClassifier(n_neighbors=1) scoring_micro = make_scorer(precision_score, average='micro') scoring_macro = make_scorer(precision_score, average='macro') scoring_samples = make_scorer(precision_score, average='samples') score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5) score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5) score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5) assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) def test_cross_val_predict(): boston = load_boston() X, y = boston.data, boston.target cv = KFold() est = Ridge() # Naive loop (should be same as cross_val_predict): preds2 = np.zeros_like(y) for train, test in cv.split(X, y): est.fit(X[train], y[train]) preds2[test] = est.predict(X[test]) preds = cross_val_predict(est, X, y, cv=cv) assert_array_almost_equal(preds, preds2) preds = cross_val_predict(est, X, y) assert_equal(len(preds), len(y)) cv = LeaveOneOut() preds = cross_val_predict(est, X, y, cv=cv) assert_equal(len(preds), len(y)) Xsp = X.copy() Xsp *= (Xsp > np.median(Xsp)) Xsp = coo_matrix(Xsp) preds = cross_val_predict(est, Xsp, y) assert_array_almost_equal(len(preds), len(y)) preds = cross_val_predict(KMeans(), X) assert_equal(len(preds), len(y)) class BadCV(): def split(self, X, y=None, labels=None): for i in range(4): yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV()) def test_cross_val_predict_input_types(): clf = Ridge() # Smoke test predictions = cross_val_predict(clf, X, y) assert_equal(predictions.shape, (10,)) # test with multioutput y predictions = cross_val_predict(clf, X_sparse, X) assert_equal(predictions.shape, (10, 2)) predictions = cross_val_predict(clf, X_sparse, y) assert_array_equal(predictions.shape, (10,)) # test with multioutput y predictions = cross_val_predict(clf, X_sparse, X) assert_array_equal(predictions.shape, (10, 2)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) predictions = cross_val_predict(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) predictions = cross_val_predict(clf, X, y.tolist()) # test with 3d X and X_3d = X[:, :, np.newaxis] check_3d = lambda x: x.ndim == 3 clf = CheckingClassifier(check_X=check_3d) predictions = cross_val_predict(clf, X_3d, y) assert_array_equal(predictions.shape, (10,)) def test_cross_val_predict_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cross_val_predict(clf, X_df, y_ser) def test_cross_val_score_sparse_fit_params(): iris = load_iris() X, y = iris.data, iris.target clf = MockClassifier() fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))} a = cross_val_score(clf, X, y, fit_params=fit_params) assert_array_equal(a, np.ones(3)) def test_learning_curve(): X, y = make_classification(n_samples=30, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) estimator = MockImprovingEstimator(20) with warnings.catch_warnings(record=True) as w: train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10)) if len(w) > 0: raise RuntimeError("Unexpected warning: %r" % w[0].message) assert_equal(train_scores.shape, (10, 3)) assert_equal(test_scores.shape, (10, 3)) assert_array_equal(train_sizes, np.linspace(2, 20, 10)) assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) def test_learning_curve_unsupervised(): X, _ = make_classification(n_samples=30, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) estimator = MockImprovingEstimator(20) train_sizes, train_scores, test_scores = learning_curve( estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10)) assert_array_equal(train_sizes, np.linspace(2, 20, 10)) assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) def test_learning_curve_verbose(): X, y = make_classification(n_samples=30, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) estimator = MockImprovingEstimator(20) old_stdout = sys.stdout sys.stdout = StringIO() try: train_sizes, train_scores, test_scores = \ learning_curve(estimator, X, y, cv=3, verbose=1) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert("[learning_curve]" in out) def test_learning_curve_incremental_learning_not_possible(): X, y = make_classification(n_samples=2, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) # The mockup does not have partial_fit() estimator = MockImprovingEstimator(1) assert_raises(ValueError, learning_curve, estimator, X, y, exploit_incremental_learning=True) def test_learning_curve_incremental_learning(): X, y = make_classification(n_samples=30, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) estimator = MockIncrementalImprovingEstimator(20) train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=3, exploit_incremental_learning=True, train_sizes=np.linspace(0.1, 1.0, 10)) assert_array_equal(train_sizes, np.linspace(2, 20, 10)) assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) def test_learning_curve_incremental_learning_unsupervised(): X, _ = make_classification(n_samples=30, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) estimator = MockIncrementalImprovingEstimator(20) train_sizes, train_scores, test_scores = learning_curve( estimator, X, y=None, cv=3, exploit_incremental_learning=True, train_sizes=np.linspace(0.1, 1.0, 10)) assert_array_equal(train_sizes, np.linspace(2, 20, 10)) assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) def test_learning_curve_batch_and_incremental_learning_are_equal(): X, y = make_classification(n_samples=30, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) train_sizes = np.linspace(0.2, 1.0, 5) estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False) train_sizes_inc, train_scores_inc, test_scores_inc = \ learning_curve( estimator, X, y, train_sizes=train_sizes, cv=3, exploit_incremental_learning=True) train_sizes_batch, train_scores_batch, test_scores_batch = \ learning_curve( estimator, X, y, cv=3, train_sizes=train_sizes, exploit_incremental_learning=False) assert_array_equal(train_sizes_inc, train_sizes_batch) assert_array_almost_equal(train_scores_inc.mean(axis=1), train_scores_batch.mean(axis=1)) assert_array_almost_equal(test_scores_inc.mean(axis=1), test_scores_batch.mean(axis=1)) def test_learning_curve_n_sample_range_out_of_bounds(): X, y = make_classification(n_samples=30, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) estimator = MockImprovingEstimator(20) assert_raises(ValueError, learning_curve, estimator, X, y, cv=3, train_sizes=[0, 1]) assert_raises(ValueError, learning_curve, estimator, X, y, cv=3, train_sizes=[0.0, 1.0]) assert_raises(ValueError, learning_curve, estimator, X, y, cv=3, train_sizes=[0.1, 1.1]) assert_raises(ValueError, learning_curve, estimator, X, y, cv=3, train_sizes=[0, 20]) assert_raises(ValueError, learning_curve, estimator, X, y, cv=3, train_sizes=[1, 21]) def test_learning_curve_remove_duplicate_sample_sizes(): X, y = make_classification(n_samples=3, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) estimator = MockImprovingEstimator(2) train_sizes, _, _ = assert_warns( RuntimeWarning, learning_curve, estimator, X, y, cv=3, train_sizes=np.linspace(0.33, 1.0, 3)) assert_array_equal(train_sizes, [1, 2]) def test_learning_curve_with_boolean_indices(): X, y = make_classification(n_samples=30, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) estimator = MockImprovingEstimator(20) cv = KFold(n_folds=3) train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10)) assert_array_equal(train_sizes, np.linspace(2, 20, 10)) assert_array_almost_equal(train_scores.mean(axis=1), np.linspace(1.9, 1.0, 10)) assert_array_almost_equal(test_scores.mean(axis=1), np.linspace(0.1, 1.0, 10)) def test_validation_curve(): X, y = make_classification(n_samples=2, n_features=1, n_informative=1, n_redundant=0, n_classes=2, n_clusters_per_class=1, random_state=0) param_range = np.linspace(0, 1, 10) with warnings.catch_warnings(record=True) as w: train_scores, test_scores = validation_curve( MockEstimatorWithParameter(), X, y, param_name="param", param_range=param_range, cv=2 ) if len(w) > 0: raise RuntimeError("Unexpected warning: %r" % w[0].message) assert_array_almost_equal(train_scores.mean(axis=1), param_range) assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range) def test_check_is_permutation(): p = np.arange(100) assert_true(_check_is_permutation(p, 100)) assert_false(_check_is_permutation(np.delete(p, 23), 100)) p[0] = 23 assert_false(_check_is_permutation(p, 100)) def test_cross_val_predict_sparse_prediction(): # check that cross_val_predict gives same result for sparse and dense input X, y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, return_indicator=True, random_state=1) X_sparse = csr_matrix(X) y_sparse = csr_matrix(y) classif = OneVsRestClassifier(SVC(kernel='linear')) preds = cross_val_predict(classif, X, y, cv=10) preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10) preds_sparse = preds_sparse.toarray() assert_array_almost_equal(preds_sparse, preds)
bsd-3-clause
adammenges/statsmodels
statsmodels/sandbox/km_class.py
31
11748
#a class for the Kaplan-Meier estimator from statsmodels.compat.python import range import numpy as np from math import sqrt import matplotlib.pyplot as plt class KAPLAN_MEIER(object): def __init__(self, data, timesIn, groupIn, censoringIn): raise RuntimeError('Newer version of Kaplan-Meier class available in survival2.py') #store the inputs self.data = data self.timesIn = timesIn self.groupIn = groupIn self.censoringIn = censoringIn def fit(self): #split the data into groups based on the predicting variable #get a set of all the groups groups = list(set(self.data[:,self.groupIn])) #create an empty list to store the data for different groups groupList = [] #create an empty list for each group and add it to groups for i in range(len(groups)): groupList.append([]) #iterate through all the groups in groups for i in range(len(groups)): #iterate though the rows of dataArray for j in range(len(self.data)): #test if this row has the correct group if self.data[j,self.groupIn] == groups[i]: #add the row to groupList groupList[i].append(self.data[j]) #create an empty list to store the times for each group timeList = [] #iterate through all the groups for i in range(len(groupList)): #create an empty list times = [] #iterate through all the rows of the group for j in range(len(groupList[i])): #get a list of all the times in the group times.append(groupList[i][j][self.timesIn]) #get a sorted set of the times and store it in timeList times = list(sorted(set(times))) timeList.append(times) #get a list of the number at risk and events at each time #create an empty list to store the results in timeCounts = [] #create an empty list to hold points for plotting points = [] #create a list for points where censoring occurs censoredPoints = [] #iterate trough each group for i in range(len(groupList)): #initialize a variable to estimate the survival function survival = 1 #initialize a variable to estimate the variance of #the survival function varSum = 0 #initialize a counter for the number at risk riskCounter = len(groupList[i]) #create a list for the counts for this group counts = [] ##create a list for points to plot x = [] y = [] #iterate through the list of times for j in range(len(timeList[i])): if j != 0: if j == 1: #add an indicator to tell if the time #starts a new group groupInd = 1 #add (0,1) to the list of points x.append(0) y.append(1) #add the point time to the right of that x.append(timeList[i][j-1]) y.append(1) #add the point below that at survival x.append(timeList[i][j-1]) y.append(survival) #add the survival to y y.append(survival) else: groupInd = 0 #add survival twice to y y.append(survival) y.append(survival) #add the time twice to x x.append(timeList[i][j-1]) x.append(timeList[i][j-1]) #add each censored time, number of censorings and #its survival to censoredPoints censoredPoints.append([timeList[i][j-1], censoringNum,survival,groupInd]) #add the count to the list counts.append([timeList[i][j-1],riskCounter, eventCounter,survival, sqrt(((survival)**2)*varSum)]) #increment the number at risk riskCounter += -1*(riskChange) #initialize a counter for the change in the number at risk riskChange = 0 #initialize a counter to zero eventCounter = 0 #intialize a counter to tell when censoring occurs censoringCounter = 0 censoringNum = 0 #iterate through the observations in each group for k in range(len(groupList[i])): #check of the observation has the given time if (groupList[i][k][self.timesIn]) == (timeList[i][j]): #increment the number at risk counter riskChange += 1 #check if this is an event or censoring if groupList[i][k][self.censoringIn] == 1: #add 1 to the counter eventCounter += 1 else: censoringNum += 1 #check if there are any events at this time if eventCounter != censoringCounter: censoringCounter = eventCounter #calculate the estimate of the survival function survival *= ((float(riskCounter) - eventCounter)/(riskCounter)) try: #calculate the estimate of the variance varSum += (eventCounter)/((riskCounter) *(float(riskCounter)- eventCounter)) except ZeroDivisionError: varSum = 0 #append the last row to counts counts.append([timeList[i][len(timeList[i])-1], riskCounter,eventCounter,survival, sqrt(((survival)**2)*varSum)]) #add the last time once to x x.append(timeList[i][len(timeList[i])-1]) x.append(timeList[i][len(timeList[i])-1]) #add the last survival twice to y y.append(survival) #y.append(survival) censoredPoints.append([timeList[i][len(timeList[i])-1], censoringNum,survival,1]) #add the list for the group to al ist for all the groups timeCounts.append(np.array(counts)) points.append([x,y]) #returns a list of arrays, where each array has as it columns: the time, #the number at risk, the number of events, the estimated value of the #survival function at that time, and the estimated standard error at #that time, in that order self.results = timeCounts self.points = points self.censoredPoints = censoredPoints def plot(self): x = [] #iterate through the groups for i in range(len(self.points)): #plot x and y plt.plot(np.array(self.points[i][0]),np.array(self.points[i][1])) #create lists of all the x and y values x += self.points[i][0] for j in range(len(self.censoredPoints)): #check if censoring is occuring if (self.censoredPoints[j][1] != 0): #if this is the first censored point if (self.censoredPoints[j][3] == 1) and (j == 0): #calculate a distance beyond 1 to place it #so all the points will fit dx = ((1./((self.censoredPoints[j][1])+1.)) *(float(self.censoredPoints[j][0]))) #iterate through all the censored points at this time for k in range(self.censoredPoints[j][1]): #plot a vertical line for censoring plt.vlines((1+((k+1)*dx)), self.censoredPoints[j][2]-0.03, self.censoredPoints[j][2]+0.03) #if this censored point starts a new group elif ((self.censoredPoints[j][3] == 1) and (self.censoredPoints[j-1][3] == 1)): #calculate a distance beyond 1 to place it #so all the points will fit dx = ((1./((self.censoredPoints[j][1])+1.)) *(float(self.censoredPoints[j][0]))) #iterate through all the censored points at this time for k in range(self.censoredPoints[j][1]): #plot a vertical line for censoring plt.vlines((1+((k+1)*dx)), self.censoredPoints[j][2]-0.03, self.censoredPoints[j][2]+0.03) #if this is the last censored point elif j == (len(self.censoredPoints) - 1): #calculate a distance beyond the previous time #so that all the points will fit dx = ((1./((self.censoredPoints[j][1])+1.)) *(float(self.censoredPoints[j][0]))) #iterate through all the points at this time for k in range(self.censoredPoints[j][1]): #plot a vertical line for censoring plt.vlines((self.censoredPoints[j-1][0]+((k+1)*dx)), self.censoredPoints[j][2]-0.03, self.censoredPoints[j][2]+0.03) #if this is a point in the middle of the group else: #calcuate a distance beyond the current time #to place the point, so they all fit dx = ((1./((self.censoredPoints[j][1])+1.)) *(float(self.censoredPoints[j+1][0]) - self.censoredPoints[j][0])) #iterate through all the points at this time for k in range(self.censoredPoints[j][1]): #plot a vetical line for censoring plt.vlines((self.censoredPoints[j][0]+((k+1)*dx)), self.censoredPoints[j][2]-0.03, self.censoredPoints[j][2]+0.03) #set the size of the plot so it extends to the max x and above 1 for y plt.xlim((0,np.max(x))) plt.ylim((0,1.05)) #label the axes plt.xlabel('time') plt.ylabel('survival') plt.show() def show_results(self): #start a string that will be a table of the results resultsString = '' #iterate through all the groups for i in range(len(self.results)): #label the group and header resultsString += ('Group {0}\n\n'.format(i) + 'Time At Risk Events Survival Std. Err\n') for j in self.results[i]: #add the results to the string resultsString += ( '{0:<9d}{1:<12d}{2:<11d}{3:<13.4f}{4:<6.4f}\n'.format( int(j[0]),int(j[1]),int(j[2]),j[3],j[4])) print(resultsString)
bsd-3-clause
datapythonista/pandas
pandas/core/arrays/__init__.py
3
1129
from pandas.core.arrays.base import ( ExtensionArray, ExtensionOpsMixin, ExtensionScalarOpsMixin, ) from pandas.core.arrays.boolean import BooleanArray from pandas.core.arrays.categorical import Categorical from pandas.core.arrays.datetimes import DatetimeArray from pandas.core.arrays.floating import FloatingArray from pandas.core.arrays.integer import IntegerArray from pandas.core.arrays.interval import IntervalArray from pandas.core.arrays.masked import BaseMaskedArray from pandas.core.arrays.numpy_ import PandasArray from pandas.core.arrays.period import ( PeriodArray, period_array, ) from pandas.core.arrays.sparse import SparseArray from pandas.core.arrays.string_ import StringArray from pandas.core.arrays.timedeltas import TimedeltaArray __all__ = [ "ExtensionArray", "ExtensionOpsMixin", "ExtensionScalarOpsMixin", "BaseMaskedArray", "BooleanArray", "Categorical", "DatetimeArray", "FloatingArray", "IntegerArray", "IntervalArray", "PandasArray", "PeriodArray", "period_array", "SparseArray", "StringArray", "TimedeltaArray", ]
bsd-3-clause