repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
714
810k
license
stringclasses
15 values
bhargav/scikit-learn
sklearn/tree/tests/test_tree.py
32
52369
""" Testing for the tree module (sklearn.tree). """ import pickle from functools import partial from itertools import product import platform import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from sklearn.random_projection import sparse_random_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns from sklearn.utils.testing import raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.validation import check_random_state from sklearn.exceptions import NotFittedError from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.tree import ExtraTreeClassifier from sklearn.tree import ExtraTreeRegressor from sklearn import tree from sklearn.tree.tree import SPARSE_SPLITTERS from sklearn.tree._tree import TREE_LEAF from sklearn import datasets from sklearn.utils import compute_sample_weight CLF_CRITERIONS = ("gini", "entropy") REG_CRITERIONS = ("mse", ) CLF_TREES = { "DecisionTreeClassifier": DecisionTreeClassifier, "Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier, presort=True), "ExtraTreeClassifier": ExtraTreeClassifier, } REG_TREES = { "DecisionTreeRegressor": DecisionTreeRegressor, "Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor, presort=True), "ExtraTreeRegressor": ExtraTreeRegressor, } ALL_TREES = dict() ALL_TREES.update(CLF_TREES) ALL_TREES.update(REG_TREES) SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor"] X_small = np.array([ [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ], [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ], [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ], [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ], [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ], [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ], [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ], [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ], [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ], [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ], [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ], [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ], [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ], [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ], [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ], [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ], [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]]) y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1, 0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0] # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] digits = datasets.load_digits() perm = rng.permutation(digits.target.size) digits.data = digits.data[perm] digits.target = digits.target[perm] random_state = check_random_state(0) X_multilabel, y_multilabel = datasets.make_multilabel_classification( random_state=0, n_samples=30, n_features=10) X_sparse_pos = random_state.uniform(size=(20, 5)) X_sparse_pos[X_sparse_pos <= 0.8] = 0. y_random = random_state.randint(0, 4, size=(20, )) X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0) DATASETS = { "iris": {"X": iris.data, "y": iris.target}, "boston": {"X": boston.data, "y": boston.target}, "digits": {"X": digits.data, "y": digits.target}, "toy": {"X": X, "y": y}, "clf_small": {"X": X_small, "y": y_small}, "reg_small": {"X": X_small, "y": y_small_reg}, "multilabel": {"X": X_multilabel, "y": y_multilabel}, "sparse-pos": {"X": X_sparse_pos, "y": y_random}, "sparse-neg": {"X": - X_sparse_pos, "y": y_random}, "sparse-mix": {"X": X_sparse_mix, "y": y_random}, "zeros": {"X": np.zeros((20, 3)), "y": y_random} } for name in DATASETS: DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"]) def assert_tree_equal(d, s, message): assert_equal(s.node_count, d.node_count, "{0}: inequal number of node ({1} != {2})" "".format(message, s.node_count, d.node_count)) assert_array_equal(d.children_right, s.children_right, message + ": inequal children_right") assert_array_equal(d.children_left, s.children_left, message + ": inequal children_left") external = d.children_right == TREE_LEAF internal = np.logical_not(external) assert_array_equal(d.feature[internal], s.feature[internal], message + ": inequal features") assert_array_equal(d.threshold[internal], s.threshold[internal], message + ": inequal threshold") assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(), message + ": inequal sum(n_node_samples)") assert_array_equal(d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples") assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity") assert_array_almost_equal(d.value[external], s.value[external], err_msg=message + ": inequal value") def test_classification_toy(): # Check classification on a toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_weighted_classification_toy(): # Check classification on a weighted toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y, sample_weight=np.ones(len(X))) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_regression_toy(): # Check regression on a toy dataset. for name, Tree in REG_TREES.items(): reg = Tree(random_state=1) reg.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) def test_xor(): # Check on a XOR problem y = np.zeros((10, 10)) y[:5, :5] = 1 y[5:, 5:] = 1 gridx, gridy = np.indices(y.shape) X = np.vstack([gridx.ravel(), gridy.ravel()]).T y = y.ravel() for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) clf = Tree(random_state=0, max_features=1) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) def test_iris(): # Check consistency on dataset iris. for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS): clf = Tree(criterion=criterion, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.9, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) clf = Tree(criterion=criterion, max_features=2, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.5, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_boston(): # Check consistency on dataset boston house prices. for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS): reg = Tree(criterion=criterion, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 1, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) # using fewer features reduces the learning ability of this tree, # but reduces training time. reg = Tree(criterion=criterion, max_features=6, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 2, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_probability(): # Predict probabilities using DecisionTreeClassifier. for name, Tree in CLF_TREES.items(): clf = Tree(max_depth=1, max_features=1, random_state=42) clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]), err_msg="Failed with {0}".format(name)) assert_array_equal(np.argmax(prob_predict, 1), clf.predict(iris.data), err_msg="Failed with {0}".format(name)) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8, err_msg="Failed with {0}".format(name)) def test_arrayrepr(): # Check the array representation. # Check resize X = np.arange(10000)[:, np.newaxis] y = np.arange(10000) for name, Tree in REG_TREES.items(): reg = Tree(max_depth=None, random_state=0) reg.fit(X, y) def test_pure_set(): # Check when y is pure. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [1, 1, 1, 1, 1, 1] for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(X, y) assert_almost_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) def test_numerical_stability(): # Check numerical stability. X = np.array([ [152.08097839, 140.40744019, 129.75102234, 159.90493774], [142.50700378, 135.81935120, 117.82884979, 162.75781250], [127.28772736, 140.40744019, 129.75102234, 159.90493774], [132.37025452, 143.71923828, 138.35694885, 157.84558105], [103.10237122, 143.71928406, 138.35696411, 157.84559631], [127.71276855, 143.71923828, 138.35694885, 157.84558105], [120.91514587, 140.40744019, 129.75102234, 159.90493774]]) y = np.array( [1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521]) with np.errstate(all="raise"): for name, Tree in REG_TREES.items(): reg = Tree(random_state=0) reg.fit(X, y) reg.fit(X, -y) reg.fit(-X, y) reg.fit(-X, -y) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10, "Failed with {0}".format(name)) assert_equal(n_important, 3, "Failed with {0}".format(name)) X_new = assert_warns( DeprecationWarning, clf.transform, X, threshold="mean") assert_less(0, X_new.shape[1], "Failed with {0}".format(name)) assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name)) # Check on iris that importances are the same for all builders clf = DecisionTreeClassifier(random_state=0) clf.fit(iris.data, iris.target) clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data)) clf2.fit(iris.data, iris.target) assert_array_equal(clf.feature_importances_, clf2.feature_importances_) @raises(ValueError) def test_importances_raises(): # Check if variable importance before fit raises ValueError. clf = DecisionTreeClassifier() clf.feature_importances_ def test_importances_gini_equal_mse(): # Check that gini is equivalent to mse for binary output variable X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # The gini index and the mean square error (variance) might differ due # to numerical instability. Since those instabilities mainly occurs at # high tree depth, we restrict this maximal depth. clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit(X, y) reg = DecisionTreeRegressor(criterion="mse", max_depth=5, random_state=0).fit(X, y) assert_almost_equal(clf.feature_importances_, reg.feature_importances_) assert_array_equal(clf.tree_.feature, reg.tree_.feature) assert_array_equal(clf.tree_.children_left, reg.tree_.children_left) assert_array_equal(clf.tree_.children_right, reg.tree_.children_right) assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples) def test_max_features(): # Check max_features. for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(max_features="auto") reg.fit(boston.data, boston.target) assert_equal(reg.max_features_, boston.data.shape[1]) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(max_features="auto") clf.fit(iris.data, iris.target) assert_equal(clf.max_features_, 2) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_features="sqrt") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.sqrt(iris.data.shape[1]))) est = TreeEstimator(max_features="log2") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.log2(iris.data.shape[1]))) est = TreeEstimator(max_features=1) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=3) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 3) est = TreeEstimator(max_features=0.01) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=0.5) est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(0.5 * iris.data.shape[1])) est = TreeEstimator(max_features=1.0) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) est = TreeEstimator(max_features=None) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) # use values of max_features that are invalid est = TreeEstimator(max_features=10) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=-1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=0.0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=1.5) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features="foobar") assert_raises(ValueError, est.fit, X, y) def test_error(): # Test that it gives proper exception on deficient input. for name, TreeEstimator in CLF_TREES.items(): # predict before fit est = TreeEstimator() assert_raises(NotFittedError, est.predict_proba, X) est.fit(X, y) X2 = [[-2, -1, 1]] # wrong feature shape for sample assert_raises(ValueError, est.predict_proba, X2) for name, TreeEstimator in ALL_TREES.items(): # Invalid values for parameters assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=0.51).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y) # Wrong dimensions est = TreeEstimator() y2 = y[:-1] assert_raises(ValueError, est.fit, X, y2) # Test with arrays that are non-contiguous. Xf = np.asfortranarray(X) est = TreeEstimator() est.fit(Xf, y) assert_almost_equal(est.predict(T), true_result) # predict before fitting est = TreeEstimator() assert_raises(NotFittedError, est.predict, T) # predict on vector with different dims est.fit(X, y) t = np.asarray(T) assert_raises(ValueError, est.predict, t[:, 1:]) # wrong sample shape Xt = np.array(X).T est = TreeEstimator() est.fit(np.dot(X, Xt), y) assert_raises(ValueError, est.predict, X) assert_raises(ValueError, est.apply, X) clf = TreeEstimator() clf.fit(X, y) assert_raises(ValueError, clf.predict, Xt) assert_raises(ValueError, clf.apply, Xt) # apply before fitting est = TreeEstimator() assert_raises(NotFittedError, est.apply, T) def test_min_samples_split(): """Test min_samples_split parameter""" X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE)) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] # test for integer parameter est = TreeEstimator(min_samples_split=10, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) # count samples on nodes, -1 means it is a leaf node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] assert_greater(np.min(node_samples), 9, "Failed with {0}".format(name)) # test for float parameter est = TreeEstimator(min_samples_split=0.2, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) # count samples on nodes, -1 means it is a leaf node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1] assert_greater(np.min(node_samples), 9, "Failed with {0}".format(name)) def test_min_samples_leaf(): # Test if leaves contain more than leaf_count training examples X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE)) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()): TreeEstimator = ALL_TREES[name] # test integer parameter est = TreeEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) # test float parameter est = TreeEstimator(min_samples_leaf=0.1, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def check_min_weight_fraction_leaf(name, datasets, sparse=False): """Test if leaves contain at least min_weight_fraction_leaf of the training set""" if sparse: X = DATASETS[datasets]["X_sparse"].astype(np.float32) else: X = DATASETS[datasets]["X"].astype(np.float32) y = DATASETS[datasets]["y"] weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) TreeEstimator = ALL_TREES[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y, sample_weight=weights) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): # Check on dense input for name in ALL_TREES: yield check_min_weight_fraction_leaf, name, "iris" # Check on sparse input for name in SPARSE_TREES: yield check_min_weight_fraction_leaf, name, "multilabel", True def test_pickle(): for name, TreeEstimator in ALL_TREES.items(): if "Classifier" in name: X, y = iris.data, iris.target else: X, y = boston.data, boston.target est = TreeEstimator(random_state=0) est.fit(X, y) score = est.score(X, y) fitted_attribute = dict() for attribute in ["max_depth", "node_count", "capacity"]: fitted_attribute[attribute] = getattr(est.tree_, attribute) serialized_object = pickle.dumps(est) est2 = pickle.loads(serialized_object) assert_equal(type(est2), est.__class__) score2 = est2.score(X, y) assert_equal(score, score2, "Failed to generate same score after pickling " "with {0}".format(name)) for attribute in fitted_attribute: assert_equal(getattr(est2.tree_, attribute), fitted_attribute[attribute], "Failed to generate same attribute {0} after " "pickling with {1}".format(attribute, name)) def test_multioutput(): # Check estimators on multi-output problems. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] T = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]] # toy classification problem for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) y_hat = clf.fit(X, y).predict(T) assert_array_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) proba = clf.predict_proba(T) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = clf.predict_log_proba(T) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) # toy regression problem for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) y_hat = reg.fit(X, y).predict(T) assert_almost_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) def test_classes_shape(): # Test that n_classes_ and classes_ have proper shape. for name, TreeClassifier in CLF_TREES.items(): # Classification, single output clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = TreeClassifier(random_state=0) clf.fit(X, _y) assert_equal(len(clf.n_classes_), 2) assert_equal(len(clf.classes_), 2) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_unbalanced_iris(): # Check class rebalancing. unbalanced_X = iris.data[:125] unbalanced_y = iris.target[:125] sample_weight = compute_sample_weight("balanced", unbalanced_y) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight) assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y) def test_memory_layout(): # Check that it works no matter the memory layout for (name, TreeEstimator), dtype in product(ALL_TREES.items(), [np.float64, np.float32]): est = TreeEstimator(random_state=0) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if not est.presort: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_sample_weight(): # Check sample weighting. # Test that zero-weighted samples are not taken into account X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 sample_weight = np.ones(100) sample_weight[y == 0] = 0.0 clf = DecisionTreeClassifier(random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), np.ones(100)) # Test that low weighted samples are not taken into account at low depth X = np.arange(200)[:, np.newaxis] y = np.zeros(200) y[50:100] = 1 y[100:200] = 2 X[100:200, 0] = 200 sample_weight = np.ones(200) sample_weight[y == 2] = .51 # Samples of class '2' are still weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 149.5) sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved # Test that sample weighting is the same as having duplicates X = iris.data y = iris.target duplicates = rng.randint(0, X.shape[0], 100) clf = DecisionTreeClassifier(random_state=1) clf.fit(X[duplicates], y[duplicates]) sample_weight = np.bincount(duplicates, minlength=X.shape[0]) clf2 = DecisionTreeClassifier(random_state=1) clf2.fit(X, y, sample_weight=sample_weight) internal = clf.tree_.children_left != tree._tree.TREE_LEAF assert_array_almost_equal(clf.tree_.threshold[internal], clf2.tree_.threshold[internal]) def test_sample_weight_invalid(): # Check sample weighting raises errors. X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 clf = DecisionTreeClassifier(random_state=0) sample_weight = np.random.rand(100, 1) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.array(0) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(101) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(99) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) def check_class_weights(name): """Check class_weights resemble sample_weights behavior.""" TreeClassifier = CLF_TREES[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = TreeClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should also have no effect clf4 = TreeClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) def test_class_weights(): for name in CLF_TREES: yield check_class_weights, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. TreeClassifier = CLF_TREES[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = TreeClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Not a list or preset for multi-output clf = TreeClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in CLF_TREES: yield check_class_weight_errors, name def test_max_leaf_nodes(): # Test greedy trees with max_depth + 1 leafs. from sklearn.tree._tree import TREE_LEAF X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y) tree = est.tree_ assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1) # max_leaf_nodes in (0, 1) should raise ValueError est = TreeEstimator(max_depth=None, max_leaf_nodes=0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1) assert_raises(ValueError, est.fit, X, y) def test_max_leaf_nodes_max_depth(): # Test precedence of max_leaf_nodes over max_depth. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) tree = est.tree_ assert_greater(tree.max_depth, 1) def test_arrays_persist(): # Ensure property arrays' memory stays alive when tree disappears # non-regression for #2726 for attr in ['n_classes', 'value', 'children_left', 'children_right', 'threshold', 'impurity', 'feature', 'n_node_samples']: value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr) # if pointing to freed memory, contents may be arbitrary assert_true(-3 <= value.flat[0] < 3, 'Array points to arbitrary memory') def test_only_constant_features(): random_state = check_random_state(0) X = np.zeros((10, 20)) y = random_state.randint(0, 2, (10, )) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(random_state=0) est.fit(X, y) assert_equal(est.tree_.max_depth, 0) def test_with_only_one_non_constant_features(): X = np.hstack([np.array([[1.], [1.], [0.], [0.]]), np.zeros((4, 1000))]) y = np.array([0., 1., 0., 1.0]) for name, TreeEstimator in CLF_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2))) for name, TreeEstimator in REG_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict(X), 0.5 * np.ones((4, ))) def test_big_input(): # Test if the warning for too large inputs is appropriate. X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1) clf = DecisionTreeClassifier() try: clf.fit(X, [0, 1, 0, 1]) except ValueError as e: assert_in("float32", str(e)) def test_realloc(): from sklearn.tree._utils import _realloc_test assert_raises(MemoryError, _realloc_test) def test_huge_allocations(): n_bits = int(platform.architecture()[0].rstrip('bit')) X = np.random.randn(10, 2) y = np.random.randint(0, 2, 10) # Sanity check: we cannot request more memory than the size of the address # space. Currently raises OverflowError. huge = 2 ** (n_bits + 1) clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(Exception, clf.fit, X, y) # Non-regression test: MemoryError used to be dropped by Cython # because of missing "except *". huge = 2 ** (n_bits - 1) - 1 clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(MemoryError, clf.fit, X, y) def check_sparse_input(tree, dataset, max_depth=None): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Gain testing time if dataset in ["digits", "boston"]: n_samples = X.shape[0] // 5 X = X[:n_samples] X_sparse = X_sparse[:n_samples] y = y[:n_samples] for sparse_format in (csr_matrix, csc_matrix, coo_matrix): X_sparse = sparse_format(X_sparse) # Check the default (depth first search) d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) y_pred = d.predict(X) if tree in CLF_TREES: y_proba = d.predict_proba(X) y_log_proba = d.predict_log_proba(X) for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix): X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32) assert_array_almost_equal(s.predict(X_sparse_test), y_pred) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba) assert_array_almost_equal(s.predict_log_proba(X_sparse_test), y_log_proba) def test_sparse_input(): for tree, dataset in product(SPARSE_TREES, ("clf_small", "toy", "digits", "multilabel", "sparse-pos", "sparse-neg", "sparse-mix", "zeros")): max_depth = 3 if dataset == "digits" else None yield (check_sparse_input, tree, dataset, max_depth) # Due to numerical instability of MSE and too strict test, we limit the # maximal depth for tree, dataset in product(REG_TREES, ["boston", "reg_small"]): if tree in SPARSE_TREES: yield (check_sparse_input, tree, dataset, 2) def check_sparse_parameters(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check max_features d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_split d = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_leaf d = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y) s = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check best-first search d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y) s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_parameters(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_parameters, tree, dataset) def check_sparse_criterion(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check various criterion CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS for criterion in CRITERIONS: d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y) s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_criterion(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_criterion, tree, dataset) def check_explicit_sparse_zeros(tree, max_depth=3, n_features=10): TreeEstimator = ALL_TREES[tree] # n_samples set n_feature to ease construction of a simultaneous # construction of a csr and csc matrix n_samples = n_features samples = np.arange(n_samples) # Generate X, y random_state = check_random_state(0) indices = [] data = [] offset = 0 indptr = [offset] for i in range(n_features): n_nonzero_i = random_state.binomial(n_samples, 0.5) indices_i = random_state.permutation(samples)[:n_nonzero_i] indices.append(indices_i) data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1 data.append(data_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) data = np.array(np.concatenate(data), dtype=np.float32) X_sparse = csc_matrix((data, indices, indptr), shape=(n_samples, n_features)) X = X_sparse.toarray() X_sparse_test = csr_matrix((data, indices, indptr), shape=(n_samples, n_features)) X_test = X_sparse_test.toarray() y = random_state.randint(0, 3, size=(n_samples, )) # Ensure that X_sparse_test owns its data, indices and indptr array X_sparse_test = X_sparse_test.copy() # Ensure that we have explicit zeros assert_greater((X_sparse.data == 0.).sum(), 0) assert_greater((X_sparse_test.data == 0.).sum(), 0) # Perform the comparison d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) Xs = (X_test, X_sparse_test) for X1, X2 in product(Xs, Xs): assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2)) assert_array_almost_equal(s.apply(X1), d.apply(X2)) assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1)) assert_array_almost_equal(s.tree_.decision_path(X1).toarray(), d.tree_.decision_path(X2).toarray()) assert_array_almost_equal(s.decision_path(X1).toarray(), d.decision_path(X2).toarray()) assert_array_almost_equal(s.decision_path(X1).toarray(), s.tree_.decision_path(X1).toarray()) assert_array_almost_equal(s.predict(X1), d.predict(X2)) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X1), d.predict_proba(X2)) def test_explicit_sparse_zeros(): for tree in SPARSE_TREES: yield (check_explicit_sparse_zeros, tree) @ignore_warnings def check_raise_error_on_1d_input(name): TreeEstimator = ALL_TREES[name] X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y) est = TreeEstimator(random_state=0) est.fit(X_2d, y) assert_raises(ValueError, est.predict, [X]) @ignore_warnings def test_1d_input(): for name in ALL_TREES: yield check_raise_error_on_1d_input, name def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight): # Private function to keep pretty printing in nose yielded tests est = TreeEstimator(random_state=0) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 1) est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 0) def check_min_weight_leaf_split_level(name): TreeEstimator = ALL_TREES[name] X = np.array([[0], [0], [0], [0], [1]]) y = [0, 0, 0, 0, 1] sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2] _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight) if not TreeEstimator().presort: _check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y, sample_weight) def test_min_weight_leaf_split_level(): for name in ALL_TREES: yield check_min_weight_leaf_split_level, name def check_public_apply(name): X_small32 = X_small.astype(tree._tree.DTYPE) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def check_public_apply_sparse(name): X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE)) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def test_public_apply(): for name in ALL_TREES: yield (check_public_apply, name) for name in SPARSE_TREES: yield (check_public_apply_sparse, name) def check_presort_sparse(est, X, y): assert_raises(ValueError, est.fit, X, y) def test_presort_sparse(): ests = (DecisionTreeClassifier(presort=True), DecisionTreeRegressor(presort=True)) sparse_matrices = (csr_matrix, csc_matrix, coo_matrix) y, X = datasets.make_multilabel_classification(random_state=0, n_samples=50, n_features=1, n_classes=20) y = y[:, 0] for est, sparse_matrix in product(ests, sparse_matrices): yield check_presort_sparse, est, sparse_matrix(X), y def test_decision_path_hardcoded(): X = iris.data y = iris.target est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y) node_indicator = est.decision_path(X[:2]).toarray() assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]]) def check_decision_path(name): X = iris.data y = iris.target n_samples = X.shape[0] TreeEstimator = ALL_TREES[name] est = TreeEstimator(random_state=0, max_depth=2) est.fit(X, y) node_indicator_csr = est.decision_path(X) node_indicator = node_indicator_csr.toarray() assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count)) # Assert that leaves index are correct leaves = est.apply(X) leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)] assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) # Ensure only one leave node per sample all_leaves = est.tree_.children_left == TREE_LEAF assert_array_almost_equal(np.dot(node_indicator, all_leaves), np.ones(shape=n_samples)) # Ensure max depth is consistent with sum of indicator max_depth = node_indicator.sum(axis=1).max() assert_less_equal(est.tree_.max_depth, max_depth) def test_decision_path(): for name in ALL_TREES: yield (check_decision_path, name) def check_no_sparse_y_support(name): X, y = X_multilabel, csr_matrix(y_multilabel) TreeEstimator = ALL_TREES[name] assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y) def test_no_sparse_y_support(): # Currently we don't support sparse y for name in ALL_TREES: yield (check_no_sparse_y_support, name)
bsd-3-clause
johannfaouzi/pyts
examples/transformation/plot_boss.py
1
1763
""" ========================= Bag-of-SFA Symbols (BOSS) ========================= Bag-of-words approaches are common in time series classification. The Bag-of-SFA Symbols (BOSS) algorithm extracts words from time series and builds features representing frequencies of each word for each time series. This example illustrates the words and the frequencies of these words that have been learned by this algorithm. It is implemented as :class:`pyts.transformation.BOSS`. """ # Author: Johann Faouzi <[email protected]> # License: BSD-3-Clause import numpy as np import matplotlib.pyplot as plt from pyts.datasets import load_gunpoint from pyts.transformation import BOSS # Toy dataset X_train, _, y_train, _ = load_gunpoint(return_X_y=True) # BOSS transformation boss = BOSS(word_size=2, n_bins=4, window_size=12, sparse=False) X_boss = boss.fit_transform(X_train) # Visualize the transformation for the first time series plt.figure(figsize=(6, 4)) vocabulary_length = len(boss.vocabulary_) width = 0.3 plt.bar(np.arange(vocabulary_length) - width / 2, X_boss[y_train == 1][0], width=width, label='First time series in class 1') plt.bar(np.arange(vocabulary_length) + width / 2, X_boss[y_train == 2][0], width=width, label='First time series in class 2') plt.xticks(np.arange(vocabulary_length), np.vectorize(boss.vocabulary_.get)(np.arange(X_boss[0].size)), fontsize=12) y_max = np.max(np.concatenate([X_boss[y_train == 1][0], X_boss[y_train == 2][0]])) plt.yticks(np.arange(y_max + 1), fontsize=12) plt.xlabel("Words", fontsize=14) plt.ylabel("Frequencies", fontsize=14) plt.title("BOSS transformation", fontsize=16) plt.legend(loc='best', fontsize=10) plt.tight_layout() plt.show()
bsd-3-clause
Jimmy-Morzaria/scikit-learn
sklearn/utils/mocking.py
38
1807
from sklearn.base import BaseEstimator from sklearn.utils.testing import assert_true class ArraySlicingWrapper(object): def __init__(self, array): self.array = array def __getitem__(self, aslice): return MockDataFrame(self.array[aslice]) class MockDataFrame(object): # have shape an length but don't support indexing. def __init__(self, array): self.array = array self.shape = array.shape self.ndim = array.ndim # ugly hack to make iloc work. self.iloc = ArraySlicingWrapper(array) def __len__(self): return len(self.array) def __array__(self): # Pandas data frames also are array-like: we want to make sure that # input validation in cross-validation does not try to call that # method. return self.array class CheckingClassifier(BaseEstimator): """Dummy classifier to test pipelining and meta-estimators. Checks some property of X and y in fit / predict. This allows testing whether pipelines / cross-validation or metaestimators changed the input. """ def __init__(self, check_y=None, check_X=None, foo_param=0): self.check_y = check_y self.check_X = check_X self.foo_param = foo_param def fit(self, X, y): assert_true(len(X) == len(y)) if self.check_X is not None: assert_true(self.check_X(X)) if self.check_y is not None: assert_true(self.check_y(y)) return self def predict(self, T): if self.check_X is not None: assert_true(self.check_X(T)) return T.shape[0] def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score
bsd-3-clause
alekz112/statsmodels
statsmodels/sandbox/infotheo.py
33
16417
""" Information Theoretic and Entropy Measures References ---------- Golan, As. 2008. "Information and Entropy Econometrics -- A Review and Synthesis." Foundations And Trends in Econometrics 2(1-2), 1-145. Golan, A., Judge, G., and Miller, D. 1996. Maximum Entropy Econometrics. Wiley & Sons, Chichester. """ #For MillerMadow correction #Miller, G. 1955. Note on the bias of information estimates. Info. Theory # Psychol. Prob. Methods II-B:95-100. #For ChaoShen method #Chao, A., and T.-J. Shen. 2003. Nonparametric estimation of Shannon's index of diversity when #there are unseen species in sample. Environ. Ecol. Stat. 10:429-443. #Good, I. J. 1953. The population frequencies of species and the estimation of population parameters. #Biometrika 40:237-264. #Horvitz, D.G., and D. J. Thompson. 1952. A generalization of sampling without replacement from a finute universe. J. Am. Stat. Assoc. 47:663-685. #For NSB method #Nemenman, I., F. Shafee, and W. Bialek. 2002. Entropy and inference, revisited. In: Dietterich, T., #S. Becker, Z. Gharamani, eds. Advances in Neural Information Processing Systems 14: 471-478. #Cambridge (Massachusetts): MIT Press. #For shrinkage method #Dougherty, J., Kohavi, R., and Sahami, M. (1995). Supervised and unsupervised discretization of #continuous features. In International Conference on Machine Learning. #Yang, Y. and Webb, G. I. (2003). Discretization for naive-bayes learning: managing discretization #bias and variance. Technical Report 2003/131 School of Computer Science and Software Engineer- #ing, Monash University. from statsmodels.compat.python import range, lzip, lmap from scipy import stats import numpy as np from matplotlib import pyplot as plt from scipy.misc import logsumexp as sp_logsumexp #TODO: change these to use maxentutils so that over/underflow is handled #with the logsumexp. def logsumexp(a, axis=None): """ Compute the log of the sum of exponentials log(e^{a_1}+...e^{a_n}) of a Avoids numerical overflow. Parameters ---------- a : array-like The vector to exponentiate and sum axis : int, optional The axis along which to apply the operation. Defaults is None. Returns ------- sum(log(exp(a))) Notes ----- This function was taken from the mailing list http://mail.scipy.org/pipermail/scipy-user/2009-October/022931.html This should be superceded by the ufunc when it is finished. """ if axis is None: # Use the scipy.maxentropy version. return sp_logsumexp(a) a = np.asarray(a) shp = list(a.shape) shp[axis] = 1 a_max = a.max(axis=axis) s = np.log(np.exp(a - a_max.reshape(shp)).sum(axis=axis)) lse = a_max + s return lse def _isproperdist(X): """ Checks to see if `X` is a proper probability distribution """ X = np.asarray(X) if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1): return False else: return True def discretize(X, method="ef", nbins=None): """ Discretize `X` Parameters ---------- bins : int, optional Number of bins. Default is floor(sqrt(N)) method : string "ef" is equal-frequency binning "ew" is equal-width binning Examples -------- """ nobs = len(X) if nbins == None: nbins = np.floor(np.sqrt(nobs)) if method == "ef": discrete = np.ceil(nbins * stats.rankdata(X)/nobs) if method == "ew": width = np.max(X) - np.min(X) width = np.floor(width/nbins) svec, ivec = stats.fastsort(X) discrete = np.zeros(nobs) binnum = 1 base = svec[0] discrete[ivec[0]] = binnum for i in range(1,nobs): if svec[i] < base + width: discrete[ivec[i]] = binnum else: base = svec[i] binnum += 1 discrete[ivec[i]] = binnum return discrete #TODO: looks okay but needs more robust tests for corner cases def logbasechange(a,b): """ There is a one-to-one transformation of the entropy value from a log base b to a log base a : H_{b}(X)=log_{b}(a)[H_{a}(X)] Returns ------- log_{b}(a) """ return np.log(b)/np.log(a) def natstobits(X): """ Converts from nats to bits """ return logbasechange(np.e, 2) * X def bitstonats(X): """ Converts from bits to nats """ return logbasechange(2, np.e) * X #TODO: make this entropy, and then have different measures as #a method def shannonentropy(px, logbase=2): """ This is Shannon's entropy Parameters ----------- logbase, int or np.e The base of the log px : 1d or 2d array_like Can be a discrete probability distribution, a 2d joint distribution, or a sequence of probabilities. Returns ----- For log base 2 (bits) given a discrete distribution H(p) = sum(px * log2(1/px) = -sum(pk*log2(px)) = E[log2(1/p(X))] For log base 2 (bits) given a joint distribution H(px,py) = -sum_{k,j}*w_{kj}log2(w_{kj}) Notes ----- shannonentropy(0) is defined as 0 """ #TODO: haven't defined the px,py case? px = np.asarray(px) if not np.all(px <= 1) or not np.all(px >= 0): raise ValueError("px does not define proper distribution") entropy = -np.sum(np.nan_to_num(px*np.log2(px))) if logbase != 2: return logbasechange(2,logbase) * entropy else: return entropy # Shannon's information content def shannoninfo(px, logbase=2): """ Shannon's information Parameters ---------- px : float or array-like `px` is a discrete probability distribution Returns ------- For logbase = 2 np.log2(px) """ px = np.asarray(px) if not np.all(px <= 1) or not np.all(px >= 0): raise ValueError("px does not define proper distribution") if logbase != 2: return - logbasechange(2,logbase) * np.log2(px) else: return - np.log2(px) def condentropy(px, py, pxpy=None, logbase=2): """ Return the conditional entropy of X given Y. Parameters ---------- px : array-like py : array-like pxpy : array-like, optional If pxpy is None, the distributions are assumed to be independent and conendtropy(px,py) = shannonentropy(px) logbase : int or np.e Returns ------- sum_{kj}log(q_{j}/w_{kj} where q_{j} = Y[j] and w_kj = X[k,j] """ if not _isproperdist(px) or not _isproperdist(py): raise ValueError("px or py is not a proper probability distribution") if pxpy != None and not _isproperdist(pxpy): raise ValueError("pxpy is not a proper joint distribtion") if pxpy == None: pxpy = np.outer(py,px) condent = np.sum(pxpy * np.nan_to_num(np.log2(py/pxpy))) if logbase == 2: return condent else: return logbasechange(2, logbase) * condent def mutualinfo(px,py,pxpy, logbase=2): """ Returns the mutual information between X and Y. Parameters ---------- px : array-like Discrete probability distribution of random variable X py : array-like Discrete probability distribution of random variable Y pxpy : 2d array-like The joint probability distribution of random variables X and Y. Note that if X and Y are independent then the mutual information is zero. logbase : int or np.e, optional Default is 2 (bits) Returns ------- shannonentropy(px) - condentropy(px,py,pxpy) """ if not _isproperdist(px) or not _isproperdist(py): raise ValueError("px or py is not a proper probability distribution") if pxpy != None and not _isproperdist(pxpy): raise ValueError("pxpy is not a proper joint distribtion") if pxpy == None: pxpy = np.outer(py,px) return shannonentropy(px, logbase=logbase) - condentropy(px,py,pxpy, logbase=logbase) def corrent(px,py,pxpy,logbase=2): """ An information theoretic correlation measure. Reflects linear and nonlinear correlation between two random variables X and Y, characterized by the discrete probability distributions px and py respectively. Parameters ---------- px : array-like Discrete probability distribution of random variable X py : array-like Discrete probability distribution of random variable Y pxpy : 2d array-like, optional Joint probability distribution of X and Y. If pxpy is None, X and Y are assumed to be independent. logbase : int or np.e, optional Default is 2 (bits) Returns ------- mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py,logbase=logbase) Notes ----- This is also equivalent to corrent(px,py,pxpy) = 1 - condent(px,py,pxpy)/shannonentropy(py) """ if not _isproperdist(px) or not _isproperdist(py): raise ValueError("px or py is not a proper probability distribution") if pxpy != None and not _isproperdist(pxpy): raise ValueError("pxpy is not a proper joint distribtion") if pxpy == None: pxpy = np.outer(py,px) return mutualinfo(px,py,pxpy,logbase=logbase)/shannonentropy(py, logbase=logbase) def covent(px,py,pxpy,logbase=2): """ An information theoretic covariance measure. Reflects linear and nonlinear correlation between two random variables X and Y, characterized by the discrete probability distributions px and py respectively. Parameters ---------- px : array-like Discrete probability distribution of random variable X py : array-like Discrete probability distribution of random variable Y pxpy : 2d array-like, optional Joint probability distribution of X and Y. If pxpy is None, X and Y are assumed to be independent. logbase : int or np.e, optional Default is 2 (bits) Returns ------- condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy, logbase=logbase) Notes ----- This is also equivalent to covent(px,py,pxpy) = condent(px,py,pxpy) + condent(py,px,pxpy) """ if not _isproperdist(px) or not _isproperdist(py): raise ValueError("px or py is not a proper probability distribution") if pxpy != None and not _isproperdist(pxpy): raise ValueError("pxpy is not a proper joint distribtion") if pxpy == None: pxpy = np.outer(py,px) return condent(px,py,pxpy,logbase=logbase) + condent(py,px,pxpy, logbase=logbase) #### Generalized Entropies #### def renyientropy(px,alpha=1,logbase=2,measure='R'): """ Renyi's generalized entropy Parameters ---------- px : array-like Discrete probability distribution of random variable X. Note that px is assumed to be a proper probability distribution. logbase : int or np.e, optional Default is 2 (bits) alpha : float or inf The order of the entropy. The default is 1, which in the limit is just Shannon's entropy. 2 is Renyi (Collision) entropy. If the string "inf" or numpy.inf is specified the min-entropy is returned. measure : str, optional The type of entropy measure desired. 'R' returns Renyi entropy measure. 'T' returns the Tsallis entropy measure. Returns ------- 1/(1-alpha)*log(sum(px**alpha)) In the limit as alpha -> 1, Shannon's entropy is returned. In the limit as alpha -> inf, min-entropy is returned. """ #TODO:finish returns #TODO:add checks for measure if not _isproperdist(px): raise ValueError("px is not a proper probability distribution") alpha = float(alpha) if alpha == 1: genent = shannonentropy(px) if logbase != 2: return logbasechange(2, logbase) * genent return genent elif 'inf' in string(alpha).lower() or alpha == np.inf: return -np.log(np.max(px)) # gets here if alpha != (1 or inf) px = px**alpha genent = np.log(px.sum()) if logbase == 2: return 1/(1-alpha) * genent else: return 1/(1-alpha) * logbasechange(2, logbase) * genent #TODO: before completing this, need to rethink the organization of # (relative) entropy measures, ie., all put into one function # and have kwdargs, etc.? def gencrossentropy(px,py,pxpy,alpha=1,logbase=2, measure='T'): """ Generalized cross-entropy measures. Parameters ---------- px : array-like Discrete probability distribution of random variable X py : array-like Discrete probability distribution of random variable Y pxpy : 2d array-like, optional Joint probability distribution of X and Y. If pxpy is None, X and Y are assumed to be independent. logbase : int or np.e, optional Default is 2 (bits) measure : str, optional The measure is the type of generalized cross-entropy desired. 'T' is the cross-entropy version of the Tsallis measure. 'CR' is Cressie-Read measure. """ if __name__ == "__main__": print("From Golan (2008) \"Information and Entropy Econometrics -- A Review \ and Synthesis") print("Table 3.1") # Examples from Golan (2008) X = [.2,.2,.2,.2,.2] Y = [.322,.072,.511,.091,.004] for i in X: print(shannoninfo(i)) for i in Y: print(shannoninfo(i)) print(shannonentropy(X)) print(shannonentropy(Y)) p = [1e-5,1e-4,.001,.01,.1,.15,.2,.25,.3,.35,.4,.45,.5] plt.subplot(111) plt.ylabel("Information") plt.xlabel("Probability") x = np.linspace(0,1,100001) plt.plot(x, shannoninfo(x)) # plt.show() plt.subplot(111) plt.ylabel("Entropy") plt.xlabel("Probability") x = np.linspace(0,1,101) plt.plot(x, lmap(shannonentropy, lzip(x,1-x))) # plt.show() # define a joint probability distribution # from Golan (2008) table 3.3 w = np.array([[0,0,1./3],[1/9.,1/9.,1/9.],[1/18.,1/9.,1/6.]]) # table 3.4 px = w.sum(0) py = w.sum(1) H_X = shannonentropy(px) H_Y = shannonentropy(py) H_XY = shannonentropy(w) H_XgivenY = condentropy(px,py,w) H_YgivenX = condentropy(py,px,w) # note that cross-entropy is not a distance measure as the following shows D_YX = logbasechange(2,np.e)*stats.entropy(px, py) D_XY = logbasechange(2,np.e)*stats.entropy(py, px) I_XY = mutualinfo(px,py,w) print("Table 3.3") print(H_X,H_Y, H_XY, H_XgivenY, H_YgivenX, D_YX, D_XY, I_XY) print("discretize functions") X=np.array([21.2,44.5,31.0,19.5,40.6,38.7,11.1,15.8,31.9,25.8,20.2,14.2, 24.0,21.0,11.3,18.0,16.3,22.2,7.8,27.8,16.3,35.1,14.9,17.1,28.2,16.4, 16.5,46.0,9.5,18.8,32.1,26.1,16.1,7.3,21.4,20.0,29.3,14.9,8.3,22.5, 12.8,26.9,25.5,22.9,11.2,20.7,26.2,9.3,10.8,15.6]) discX = discretize(X) #CF: R's infotheo #TODO: compare to pyentropy quantize? print print("Example in section 3.6 of Golan, using table 3.3") print("Bounding errors using Fano's inequality") print("H(P_{e}) + P_{e}log(K-1) >= H(X|Y)") print("or, a weaker inequality") print("P_{e} >= [H(X|Y) - 1]/log(K)") print("P(x) = %s" % px) print("X = 3 has the highest probability, so this is the estimate Xhat") pe = 1 - px[2] print("The probability of error Pe is 1 - p(X=3) = %0.4g" % pe) H_pe = shannonentropy([pe,1-pe]) print("H(Pe) = %0.4g and K=3" % H_pe) print("H(Pe) + Pe*log(K-1) = %0.4g >= H(X|Y) = %0.4g" % \ (H_pe+pe*np.log2(2), H_XgivenY)) print("or using the weaker inequality") print("Pe = %0.4g >= [H(X) - 1]/log(K) = %0.4g" % (pe, (H_X - 1)/np.log2(3))) print("Consider now, table 3.5, where there is additional information") print("The conditional probabilities of P(X|Y=y) are ") w2 = np.array([[0.,0.,1.],[1/3.,1/3.,1/3.],[1/6.,1/3.,1/2.]]) print(w2) # not a proper distribution? print("The probability of error given this information is") print("Pe = [H(X|Y) -1]/log(K) = %0.4g" % ((np.mean([0,shannonentropy(w2[1]),shannonentropy(w2[2])])-1)/np.log2(3))) print("such that more information lowers the error") ### Stochastic processes markovchain = np.array([[.553,.284,.163],[.465,.312,.223],[.420,.322,.258]])
bsd-3-clause
aminert/scikit-learn
examples/applications/plot_prediction_latency.py
234
11277
""" ================== Prediction Latency ================== This is an example showing the prediction latency of various scikit-learn estimators. The goal is to measure the latency one can expect when doing predictions either in bulk or atomic (i.e. one by one) mode. The plots represent the distribution of the prediction latency as a boxplot. """ # Authors: Eustache Diemert <[email protected]> # License: BSD 3 clause from __future__ import print_function from collections import defaultdict import time import gc import numpy as np import matplotlib.pyplot as plt from scipy.stats import scoreatpercentile from sklearn.datasets.samples_generator import make_regression from sklearn.ensemble.forest import RandomForestRegressor from sklearn.linear_model.ridge import Ridge from sklearn.linear_model.stochastic_gradient import SGDRegressor from sklearn.svm.classes import SVR def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return '__file__' in globals() def atomic_benchmark_estimator(estimator, X_test, verbose=False): """Measure runtime prediction of each instance.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_instances, dtype=np.float) for i in range(n_instances): instance = X_test[i, :] start = time.time() estimator.predict(instance) runtimes[i] = time.time() - start if verbose: print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile( runtimes, 50), max(runtimes)) return runtimes def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose): """Measure runtime prediction of the whole input.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_bulk_repeats, dtype=np.float) for i in range(n_bulk_repeats): start = time.time() estimator.predict(X_test) runtimes[i] = time.time() - start runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes))) if verbose: print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile( runtimes, 50), max(runtimes)) return runtimes def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False): """ Measure runtimes of prediction in both atomic and bulk mode. Parameters ---------- estimator : already trained estimator supporting `predict()` X_test : test input n_bulk_repeats : how many times to repeat when evaluating bulk mode Returns ------- atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the runtimes in seconds. """ atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose) bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose) return atomic_runtimes, bulk_runtimes def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False): """Generate a regression dataset with the given parameters.""" if verbose: print("generating dataset...") X, y, coef = make_regression(n_samples=n_train + n_test, n_features=n_features, noise=noise, coef=True) X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] idx = np.arange(n_train) np.random.seed(13) np.random.shuffle(idx) X_train = X_train[idx] y_train = y_train[idx] std = X_train.std(axis=0) mean = X_train.mean(axis=0) X_train = (X_train - mean) / std X_test = (X_test - mean) / std std = y_train.std(axis=0) mean = y_train.mean(axis=0) y_train = (y_train - mean) / std y_test = (y_test - mean) / std gc.collect() if verbose: print("ok") return X_train, y_train, X_test, y_test def boxplot_runtimes(runtimes, pred_type, configuration): """ Plot a new `Figure` with boxplots of prediction runtimes. Parameters ---------- runtimes : list of `np.array` of latencies in micro-seconds cls_names : list of estimator class names that generated the runtimes pred_type : 'bulk' or 'atomic' """ fig, ax1 = plt.subplots(figsize=(10, 6)) bp = plt.boxplot(runtimes, ) cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer']( estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] plt.setp(ax1, xticklabels=cls_infos) plt.setp(bp['boxes'], color='black') plt.setp(bp['whiskers'], color='black') plt.setp(bp['fliers'], color='red', marker='+') ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Prediction Time per Instance - %s, %d feats.' % ( pred_type.capitalize(), configuration['n_features'])) ax1.set_ylabel('Prediction Time (us)') plt.show() def benchmark(configuration): """Run the whole benchmark.""" X_train, y_train, X_test, y_test = generate_dataset( configuration['n_train'], configuration['n_test'], configuration['n_features']) stats = {} for estimator_conf in configuration['estimators']: print("Benchmarking", estimator_conf['instance']) estimator_conf['instance'].fit(X_train, y_train) gc.collect() a, b = benchmark_estimator(estimator_conf['instance'], X_test) stats[estimator_conf['name']] = {'atomic': a, 'bulk': b} cls_names = [estimator_conf['name'] for estimator_conf in configuration[ 'estimators']] runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names] boxplot_runtimes(runtimes, 'atomic', configuration) runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names] boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'], configuration) def n_feature_influence(estimators, n_train, n_test, n_features, percentile): """ Estimate influence of the number of features on prediction time. Parameters ---------- estimators : dict of (name (str), estimator) to benchmark n_train : nber of training instances (int) n_test : nber of testing instances (int) n_features : list of feature-space dimensionality to test (int) percentile : percentile at which to measure the speed (int [0-100]) Returns: -------- percentiles : dict(estimator_name, dict(n_features, percentile_perf_in_us)) """ percentiles = defaultdict(defaultdict) for n in n_features: print("benchmarking with %d features" % n) X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n) for cls_name, estimator in estimators.items(): estimator.fit(X_train, y_train) gc.collect() runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False) percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes, percentile) return percentiles def plot_n_features_influence(percentiles, percentile): fig, ax1 = plt.subplots(figsize=(10, 6)) colors = ['r', 'g', 'b'] for i, cls_name in enumerate(percentiles.keys()): x = np.array(sorted([n for n in percentiles[cls_name].keys()])) y = np.array([percentiles[cls_name][n] for n in x]) plt.plot(x, y, color=colors[i], ) ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Evolution of Prediction Time with #Features') ax1.set_xlabel('#Features') ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile) plt.show() def benchmark_throughputs(configuration, duration_secs=0.1): """benchmark throughput for different estimators.""" X_train, y_train, X_test, y_test = generate_dataset( configuration['n_train'], configuration['n_test'], configuration['n_features']) throughputs = dict() for estimator_config in configuration['estimators']: estimator_config['instance'].fit(X_train, y_train) start_time = time.time() n_predictions = 0 while (time.time() - start_time) < duration_secs: estimator_config['instance'].predict(X_test[0]) n_predictions += 1 throughputs[estimator_config['name']] = n_predictions / duration_secs return throughputs def plot_benchmark_throughput(throughputs, configuration): fig, ax = plt.subplots(figsize=(10, 6)) colors = ['r', 'g', 'b'] cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer']( estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] cls_values = [throughputs[estimator_conf['name']] for estimator_conf in configuration['estimators']] plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors) ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs))) ax.set_xticklabels(cls_infos, fontsize=10) ymax = max(cls_values) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('Throughput (predictions/sec)') ax.set_title('Prediction Throughput for different estimators (%d ' 'features)' % configuration['n_features']) plt.show() ############################################################################### # main code start_time = time.time() # benchmark bulk/atomic prediction speed for various regressors configuration = { 'n_train': int(1e3), 'n_test': int(1e2), 'n_features': int(1e2), 'estimators': [ {'name': 'Linear Model', 'instance': SGDRegressor(penalty='elasticnet', alpha=0.01, l1_ratio=0.25, fit_intercept=True), 'complexity_label': 'non-zero coefficients', 'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)}, {'name': 'RandomForest', 'instance': RandomForestRegressor(), 'complexity_label': 'estimators', 'complexity_computer': lambda clf: clf.n_estimators}, {'name': 'SVR', 'instance': SVR(kernel='rbf'), 'complexity_label': 'support vectors', 'complexity_computer': lambda clf: len(clf.support_vectors_)}, ] } benchmark(configuration) # benchmark n_features influence on prediction speed percentile = 90 percentiles = n_feature_influence({'ridge': Ridge()}, configuration['n_train'], configuration['n_test'], [100, 250, 500], percentile) plot_n_features_influence(percentiles, percentile) # benchmark throughput throughputs = benchmark_throughputs(configuration) plot_benchmark_throughput(throughputs, configuration) stop_time = time.time() print("example run in %.2fs" % (stop_time - start_time))
bsd-3-clause
arnoldlu/lisa
tests/eas/load_tracking.py
4
12770
# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2016, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from bart.common.Utils import select_window, area_under_curve from devlib.utils.misc import memoized from trappy.stats.grammar import Parser import pandas as pd from test import LisaTest, experiment_test UTIL_SCALE = 1024 # Time in seconds to allow for util_avg to converge (i.e. ignored time) UTIL_AVG_CONVERGENCE_TIME = 0.3 # Allowed margin between expected and observed util_avg value ERROR_MARGIN_PCT = 15 class _LoadTrackingBase(LisaTest): """Base class for shared functionality of load tracking tests""" test_conf = { 'tools' : [ 'rt-app' ], 'ftrace' : { 'events' : [ 'sched_switch', 'sched_load_avg_task', 'sched_load_avg_cpu', 'sched_pelt_se', 'sched_load_se' ], }, # cgroups required by freeze_userspace flag 'modules': ['cpufreq', 'cgroups'], } @memoized @staticmethod def _get_cpu_capacity(test_env, cpu): if test_env.nrg_model: return test_env.nrg_model.get_cpu_capacity(cpu) return test_env.target.read_int( '/sys/devices/system/cpu/cpu{}/cpu_capacity'.format(cpu)) @classmethod def setUpClass(cls, *args, **kwargs): super(_LoadTrackingBase, cls).runExperiments(*args, **kwargs) @classmethod def get_wload(cls, cpu): """ Get a specification for a 10% rt-app workload, pinned to the given CPU """ return { 'type' : 'rt-app', 'conf' : { 'class' : 'periodic', 'params' : { 'duty_cycle_pct': 10, 'duration_s': 1, 'period_ms': 16, }, 'tasks' : 1, 'prefix' : 'lt_test', 'cpus' : [cpu] }, } def get_expected_util_avg(self, experiment): """ Examine trace to figure out an expected mean for util_avg Assumes an RT-App workload with a single task with a single phase """ # Find duty cycle of the experiment's workload task [task] = experiment.wload.tasks.keys() sched_assert = self.get_sched_assert(experiment, task) duty_cycle_pct = sched_assert.getDutyCycle(self.get_window(experiment)) # Find the (max) capacity of the CPU the workload was run on [cpu] = experiment.wload.cpus cpu_capacity = self._get_cpu_capacity(self.te, cpu) # Scale the capacity linearly according to the frequency the workload # was run at cpufreq = experiment.conf['cpufreq'] if cpufreq['governor'] == 'userspace': freq = cpufreq['freqs'][cpu] max_freq = max(self.te.target.cpufreq.list_frequencies(cpu)) cpu_capacity *= float(freq) / max_freq else: assert cpufreq['governor'] == 'performance' # Scale the relative CPU/freq capacity into the range 0..1 scale = max(self._get_cpu_capacity(self.te, cpu) for cpu in range(self.te.target.number_of_cpus)) scaling_factor = float(cpu_capacity) / scale return UTIL_SCALE * (duty_cycle_pct / 100.) * scaling_factor def get_sched_task_signals(self, experiment, signals): """ Get a pandas.DataFrame with the sched signals for the workload task This examines scheduler load tracking trace events, supporting either sched_load_avg_task or sched_pelt_se. You will need a target kernel that includes these events. :param experiment: Experiment to get trace for :param signals: List of load tracking signals to extract. Probably a subset of ``['util_avg', 'load_avg']`` :returns: :class:`pandas.DataFrame` with a column for each signal for the experiment's workload task """ [task] = experiment.wload.tasks.keys() trace = self.get_trace(experiment) # There are two different scheduler trace events that expose the load # tracking signals. Neither of them is in mainline. Eventually they # should be unified but for now we'll just check for both types of # event. # TODO: Add support for this parsing in Trappy and/or tasks_analysis signal_fields = signals if 'sched_load_avg_task' in trace.available_events: event = 'sched_load_avg_task' elif 'sched_load_se' in trace.available_events: event = 'sched_load_se' # sched_load_se uses 'util' and 'load' instead of 'util_avg' and # 'load_avg' signal_fields = [s.replace('_avg', '') for s in signals] elif 'sched_pelt_se' in trace.available_events: event = 'sched_pelt_se' else: raise ValueError('No sched_load_avg_task or sched_pelt_se events. ' 'Does the kernel support them?') df = getattr(trace.ftrace, event).data_frame df = df[df['comm'] == task][signal_fields] df = select_window(df, self.get_window(experiment)) return df.rename(columns=dict(zip(signal_fields, signals))) def get_signal_mean(self, experiment, signal, ignore_first_s=UTIL_AVG_CONVERGENCE_TIME): """ Get the mean of a scheduler signal for the experiment's task Ignore the first `ignore_first_s` seconds of the signal. """ (wload_start, wload_end) = self.get_window(experiment) window = (wload_start + ignore_first_s, wload_end) signal = self.get_sched_task_signals(experiment, [signal])[signal] signal = select_window(signal, window) return area_under_curve(signal) / (window[1] - window[0]) class FreqInvarianceTest(_LoadTrackingBase): """ Goal ==== Basic check for frequency invariant load tracking Detailed Description ==================== This test runs the same workload on the most capable CPU on the system at a cross section of available frequencies. The trace is then examined to find the average activation length of the workload, which is combined with the known period to estimate an expected mean value for util_avg for each frequency. The util_avg value is extracted from scheduler trace events and its mean is compared with the expected value (ignoring the first 300ms so that the signal can stabilize). The test fails if the observed mean is beyond a certain error margin from the expected one. load_avg is then similarly compared with the expected util_avg mean, under the assumption that load_avg should equal util_avg when system load is light. Expected Behaviour ================== Load tracking signals are scaled so that the workload results in roughly the same util & load values regardless of frequency. """ @classmethod def _getExperimentsConf(cls, test_env): # Run on one of the CPUs with highest capacity cpu = max(range(test_env.target.number_of_cpus), key=lambda c: cls._get_cpu_capacity(test_env, c)) wloads = { 'fie_10pct' : cls.get_wload(cpu) } # Create a set of confs with different frequencies # We'll run the 10% workload under each conf (i.e. at each frequency) confs = [] all_freqs = test_env.target.cpufreq.list_frequencies(cpu) # If we have loads of frequencies just test a cross-section so it # doesn't take all day cls.freqs = all_freqs[::len(all_freqs)/8 + 1] for freq in cls.freqs: confs.append({ 'tag' : 'freq_{}'.format(freq), 'flags' : ['ftrace', 'freeze_userspace'], 'cpufreq' : { 'freqs' : {cpu: freq}, 'governor' : 'userspace', }, }) return { 'wloads': wloads, 'confs': confs, } def _test_signal(self, experiment, tasks, signal_name): [task] = tasks exp_util = self.get_expected_util_avg(experiment) signal_mean = self.get_signal_mean(experiment, signal_name) error_margin = exp_util * (ERROR_MARGIN_PCT / 100.) [freq] = experiment.conf['cpufreq']['freqs'].values() msg = 'Saw {} around {}, expected {} at freq {}'.format( signal_name, signal_mean, exp_util, freq) self.assertAlmostEqual(signal_mean, exp_util, delta=error_margin, msg=msg) @experiment_test def test_task_util_avg(self, experiment, tasks): """ Test that the mean of the util_avg signal matched the expected value """ return self._test_signal(experiment, tasks, 'util_avg') @experiment_test def test_task_load_avg(self, experiment, tasks): """ Test that the mean of the load_avg signal matched the expected value Assuming that the system was under little stress (so the task was RUNNING whenever it was RUNNABLE) and that the task was run with a 'nice' value of 0, the load_avg should be similar to the util_avg. So, this test does the same as test_task_util_avg but for load_avg. """ return self._test_signal(experiment, tasks, 'load_avg') class CpuInvarianceTest(_LoadTrackingBase): """ Goal ==== Basic check for CPU invariant load and utilization tracking Detailed Description ==================== This test runs the same workload on one CPU of each type in the system. The trace is then examined to estimate an expected mean value for util_avg for each CPU's workload. The util_avg value is extracted from scheduler trace events and its mean is compared with the expected value (ignoring the first 300ms so that the signal can stabilize). The test fails if the observed mean is beyond a certain error margin from the expected one. load_avg is then similarly compared with the expected util_avg mean, under the assumption that load_avg should equal util_avg when system load is light. Expected Behaviour ================== Load tracking signals are scaled so that the workload results in roughly the same util & load values regardless of compute power of the CPU used. Moreover, assuming that the extraneous system load is negligible, the load signal is similar to the utilization signal. """ @classmethod def _getExperimentsConf(cls, test_env): # Run the 10% workload on one CPU in each capacity group wloads = {} tested_caps = set() for cpu in range(test_env.target.number_of_cpus): cap = cls._get_cpu_capacity(test_env, cpu) if cap in tested_caps: # No need to test on every CPU, just one for each capacity value continue tested_caps.add(cap) wloads['cie_cpu{}'.format(cpu)] = cls.get_wload(cpu) conf = { 'tag' : 'cie_conf', 'flags' : ['ftrace', 'freeze_userspace'], 'cpufreq' : {'governor' : 'performance'}, } return { 'wloads': wloads, 'confs': [conf], } def _test_signal(self, experiment, tasks, signal_name): [task] = tasks exp_util = self.get_expected_util_avg(experiment) signal_mean = self.get_signal_mean(experiment, signal_name) error_margin = exp_util * (ERROR_MARGIN_PCT / 100.) [cpu] = experiment.wload.cpus msg = 'Saw {} around {}, expected {} on cpu {}'.format( signal_name, signal_mean, exp_util, cpu) self.assertAlmostEqual(signal_mean, exp_util, delta=error_margin, msg=msg) @experiment_test def test_task_util_avg(self, experiment, tasks): """ Test that the mean of the util_avg signal matched the expected value """ return self._test_signal(experiment, tasks, 'util_avg')
apache-2.0
rosswhitfield/mantid
qt/python/mantidqt/widgets/workspacedisplay/matrix/io.py
3
1554
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2019 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + # This file is part of the mantid workbench. # from mantidqt.widgets.workspacedisplay.matrix.presenter import MatrixWorkspaceDisplay from mantid.api import AnalysisDataService as ADS # noqa class MatrixWorkspaceDisplayAttributes(object): # WARNING: If you delete a tag from here instead of adding a new one, it will make old project files obsolete so # just add an extra tag to the list e.g. ["InstrumentWidget", "IWidget"] _tags = ["MatrixWorkspaceDisplayView"] class MatrixWorkspaceDisplayEncoder(MatrixWorkspaceDisplayAttributes): def __init__(self): super(MatrixWorkspaceDisplayEncoder, self).__init__() @staticmethod def encode(obj, _=None): return {"workspace": obj.presenter.model._ws.name()} @classmethod def tags(cls): return cls._tags class MatrixWorkspaceDisplayDecoder(MatrixWorkspaceDisplayAttributes): def __init__(self): super(MatrixWorkspaceDisplayDecoder, self).__init__() @staticmethod def decode(obj_dic, _=None): import matplotlib.pyplot as plt pres = MatrixWorkspaceDisplay(ADS.retrieve(obj_dic["workspace"]), plot=plt) return pres.container @classmethod def tags(cls): return cls._tags
gpl-3.0
raazesh-sainudiin/scalable-data-science
db/2/2/056_DLbyABr_04a-Hands-On-MNIST-MLP.py
2
7018
# Databricks notebook source # MAGIC %md # MAGIC # MAGIC # [SDS-2.2, Scalable Data Science](https://lamastex.github.io/scalable-data-science/sds/2/2/) # MAGIC # MAGIC This is used in a non-profit educational setting with kind permission of [Adam Breindel](https://www.linkedin.com/in/adbreind). # MAGIC This is not licensed by Adam for use in a for-profit setting. Please contact Adam directly at `[email protected]` to request or report such use cases or abuses. # MAGIC A few minor modifications and additional mathematical statistical pointers have been added by Raazesh Sainudiin when teaching PhD students in Uppsala University. # COMMAND ---------- # MAGIC %md # MAGIC #### As we dive into more hands-on works, let's recap some basic guidelines: # MAGIC # MAGIC 0. Structure of your network is the first thing to work with, before worrying about the precise number of neurons, size of convolution filters etc. # MAGIC # MAGIC 1. "Business records" or fairly (ideally?) uncorrelated predictors -- use Dense Perceptron Layer(s) # MAGIC # MAGIC 2. Data that has 2-D patterns: 2D Convolution layer(s) # MAGIC # MAGIC 3. For activation of hidden layers, when in doubt, use ReLU # MAGIC # MAGIC 4. Output: # MAGIC * Regression: 1 neuron with linear activation # MAGIC * For k-way classification: k neurons with softmax activation # MAGIC # MAGIC 5. Deeper networks are "smarter" than wider networks (in terms of abstraction) # MAGIC # MAGIC 6. More neurons & layers \\( \to \\) more capacity \\( \to \\) more data \\( \to \\) more regularization (to prevent overfitting) # MAGIC # MAGIC 7. If you don't have any specific reason not to use the "adam" optimizer, use that one # MAGIC # MAGIC 8. Errors: # MAGIC * For regression or "wide" content matching (e.g., large image similarity), use mean-square-error; # MAGIC * For classification or narrow content matching, use cross-entropy # MAGIC # MAGIC 9. As you simplify and abstract from your raw data, you should need less features/parameters, so your layers probably become smaller and simpler. # COMMAND ---------- # MAGIC %md # MAGIC As a baseline, let's start a lab running with what we already know. # MAGIC # MAGIC We'll take our deep feed-forward multilayer perceptron network, with ReLU activations and reasonable initializations, and apply it to learning the MNIST digits. # MAGIC # MAGIC The main part of the code looks like the following (full code you can run is in the next cell): # MAGIC # MAGIC ``` # MAGIC # imports, setup, load data sets # MAGIC # MAGIC model = Sequential() # MAGIC model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu')) # MAGIC model.add(Dense(15, kernel_initializer='normal', activation='relu')) # MAGIC model.add(Dense(10, kernel_initializer='normal', activation='softmax')) # MAGIC model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy']) # MAGIC # MAGIC categorical_labels = to_categorical(y_train, num_classes=10) # MAGIC # MAGIC history = model.fit(X_train, categorical_labels, epochs=100, batch_size=100) # MAGIC # MAGIC # print metrics, plot errors # MAGIC ``` # MAGIC # MAGIC Note the changes, which are largely about building a classifier instead of a regression model: # MAGIC # MAGIC * Output layer has one neuron per category, with softmax activation # MAGIC * __Loss function is cross-entropy loss__ # MAGIC * Accuracy metric is categorical accuracy # COMMAND ---------- from keras.models import Sequential from keras.layers import Dense from keras.utils import to_categorical import sklearn.datasets import datetime import matplotlib.pyplot as plt import numpy as np train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt" test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt" X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784) X_train = X_train.toarray() X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784) X_test = X_test.toarray() model = Sequential() model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu')) model.add(Dense(15, kernel_initializer='normal', activation='relu')) model.add(Dense(10, kernel_initializer='normal', activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy']) categorical_labels = to_categorical(y_train, num_classes=10) start = datetime.datetime.today() history = model.fit(X_train, categorical_labels, epochs=40, batch_size=100, validation_split=0.1, verbose=2) scores = model.evaluate(X_test, to_categorical(y_test, num_classes=10)) print for i in range(len(model.metrics_names)): print("%s: %f" % (model.metrics_names[i], scores[i])) print ("Start: " + str(start)) end = datetime.datetime.today() print ("End: " + str(end)) print ("Elapse: " + str(end-start)) # COMMAND ---------- import matplotlib.pyplot as plt fig, ax = plt.subplots() fig.set_size_inches((5,5)) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') display(fig) # COMMAND ---------- # MAGIC %md # MAGIC # MAGIC What are the big takeaways from this experiment? # MAGIC # MAGIC 1. We get pretty impressive "apparent error" accuracy right from the start! A small network gets us to training accuracy 97% by epoch 20 # MAGIC 2. The model *appears* to continue to learn if we let it run, although it does slow down and oscillate a bit. # MAGIC 3. Our test accuracy is about 95% after 5 epochs and never gets better ... it gets worse! # MAGIC 4. Therefore, we are overfitting very quickly... most of the "training" turns out to be a waste. # MAGIC 5. For what it's worth, we get 95% accuracy without much work. # MAGIC # MAGIC This is not terrible compared to other, non-neural-network approaches to the problem. After all, we could probably tweak this a bit and do even better. # MAGIC # MAGIC But we talked about using deep learning to solve "95%" problems or "98%" problems ... where one error in 20, or 50 simply won't work. If we can get to "multiple nines" of accuracy, then we can do things like automate mail sorting and translation, create cars that react properly (all the time) to street signs, and control systems for robots or drones that function autonomously. # MAGIC # MAGIC ## You Try Now! # MAGIC # MAGIC Try two more experiments (try them separately): # MAGIC # MAGIC 1. Add a third, hidden layer. # MAGIC 2. Increase the size of the hidden layers. # MAGIC # MAGIC Adding another layer slows things down a little (why?) but doesn't seem to make a difference in accuracy. # MAGIC # MAGIC Adding a lot more neurons into the first topology slows things down significantly -- 10x as many neurons, and only a marginal increase in accuracy. Notice also (in the plot) that the learning clearly degrades after epoch 50 or so.
unlicense
pizzathief/scipy
scipy/stats/_discrete_distns.py
2
30692
# # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # from functools import partial from scipy import special from scipy.special import entr, logsumexp, betaln, gammaln as gamln from scipy._lib._util import _lazywhere, rng_integers from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh import numpy as np from ._distn_infrastructure import ( rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names) class binom_gen(rv_discrete): r"""A binomial discrete random variable. %(before_notes)s Notes ----- The probability mass function for `binom` is: .. math:: f(k) = \binom{n}{k} p^k (1-p)^{n-k} for ``k`` in ``{0, 1,..., n}``. `binom` takes ``n`` and ``p`` as shape parameters. %(after_notes)s %(example)s """ def _rvs(self, n, p, size=None, random_state=None): return random_state.binomial(n, p, size) def _argcheck(self, n, p): return (n >= 0) & (p >= 0) & (p <= 1) def _get_support(self, n, p): return self.a, n def _logpmf(self, x, n, p): k = floor(x) combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1))) return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p) def _pmf(self, x, n, p): # binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k) return exp(self._logpmf(x, n, p)) def _cdf(self, x, n, p): k = floor(x) vals = special.bdtr(k, n, p) return vals def _sf(self, x, n, p): k = floor(x) return special.bdtrc(k, n, p) def _ppf(self, q, n, p): vals = ceil(special.bdtrik(q, n, p)) vals1 = np.maximum(vals - 1, 0) temp = special.bdtr(vals1, n, p) return np.where(temp >= q, vals1, vals) def _stats(self, n, p, moments='mv'): q = 1.0 - p mu = n * p var = n * p * q g1, g2 = None, None if 's' in moments: g1 = (q - p) / sqrt(var) if 'k' in moments: g2 = (1.0 - 6*p*q) / var return mu, var, g1, g2 def _entropy(self, n, p): k = np.r_[0:n + 1] vals = self._pmf(k, n, p) return np.sum(entr(vals), axis=0) binom = binom_gen(name='binom') class bernoulli_gen(binom_gen): r"""A Bernoulli discrete random variable. %(before_notes)s Notes ----- The probability mass function for `bernoulli` is: .. math:: f(k) = \begin{cases}1-p &\text{if } k = 0\\ p &\text{if } k = 1\end{cases} for :math:`k` in :math:`\{0, 1\}`. `bernoulli` takes :math:`p` as shape parameter. %(after_notes)s %(example)s """ def _rvs(self, p, size=None, random_state=None): return binom_gen._rvs(self, 1, p, size=size, random_state=random_state) def _argcheck(self, p): return (p >= 0) & (p <= 1) def _get_support(self, p): # Overrides binom_gen._get_support!x return self.a, self.b def _logpmf(self, x, p): return binom._logpmf(x, 1, p) def _pmf(self, x, p): # bernoulli.pmf(k) = 1-p if k = 0 # = p if k = 1 return binom._pmf(x, 1, p) def _cdf(self, x, p): return binom._cdf(x, 1, p) def _sf(self, x, p): return binom._sf(x, 1, p) def _ppf(self, q, p): return binom._ppf(q, 1, p) def _stats(self, p): return binom._stats(1, p) def _entropy(self, p): return entr(p) + entr(1-p) bernoulli = bernoulli_gen(b=1, name='bernoulli') class betabinom_gen(rv_discrete): r"""A beta-binomial discrete random variable. %(before_notes)s Notes ----- The beta-binomial distribution is a binomial distribution with a probability of success `p` that follows a beta distribution. The probability mass function for `betabinom` is: .. math:: f(k) = \binom{n}{k} \frac{B(k + a, n - k + b)}{B(a, b)} for ``k`` in ``{0, 1,..., n}``, :math:`n \geq 0`, :math:`a > 0`, :math:`b > 0`, where :math:`B(a, b)` is the beta function. `betabinom` takes :math:`n`, :math:`a`, and :math:`b` as shape parameters. References ---------- .. [1] https://en.wikipedia.org/wiki/Beta-binomial_distribution %(after_notes)s .. versionadded:: 1.4.0 See Also -------- beta, binom %(example)s """ def _rvs(self, n, a, b, size=None, random_state=None): p = random_state.beta(a, b, size) return random_state.binomial(n, p, size) def _get_support(self, n, a, b): return 0, n def _argcheck(self, n, a, b): return (n >= 0) & (a > 0) & (b > 0) def _logpmf(self, x, n, a, b): k = floor(x) combiln = -log(n + 1) - betaln(n - k + 1, k + 1) return combiln + betaln(k + a, n - k + b) - betaln(a, b) def _pmf(self, x, n, a, b): return exp(self._logpmf(x, n, a, b)) def _stats(self, n, a, b, moments='mv'): e_p = a / (a + b) e_q = 1 - e_p mu = n * e_p var = n * (a + b + n) * e_p * e_q / (a + b + 1) g1, g2 = None, None if 's' in moments: g1 = 1.0 / sqrt(var) g1 *= (a + b + 2 * n) * (b - a) g1 /= (a + b + 2) * (a + b) if 'k' in moments: g2 = a + b g2 *= (a + b - 1 + 6 * n) g2 += 3 * a * b * (n - 2) g2 += 6 * n ** 2 g2 -= 3 * e_p * b * n * (6 - n) g2 -= 18 * e_p * e_q * n ** 2 g2 *= (a + b) ** 2 * (1 + a + b) g2 /= (n * a * b * (a + b + 2) * (a + b + 3) * (a + b + n)) g2 -= 3 return mu, var, g1, g2 betabinom = betabinom_gen(name='betabinom') class nbinom_gen(rv_discrete): r"""A negative binomial discrete random variable. %(before_notes)s Notes ----- Negative binomial distribution describes a sequence of i.i.d. Bernoulli trials, repeated until a predefined, non-random number of successes occurs. The probability mass function of the number of failures for `nbinom` is: .. math:: f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k for :math:`k \ge 0`. `nbinom` takes :math:`n` and :math:`p` as shape parameters where n is the number of successes, whereas p is the probability of a single success. %(after_notes)s %(example)s """ def _rvs(self, n, p, size=None, random_state=None): return random_state.negative_binomial(n, p, size) def _argcheck(self, n, p): return (n > 0) & (p >= 0) & (p <= 1) def _pmf(self, x, n, p): # nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k return exp(self._logpmf(x, n, p)) def _logpmf(self, x, n, p): coeff = gamln(n+x) - gamln(x+1) - gamln(n) return coeff + n*log(p) + special.xlog1py(x, -p) def _cdf(self, x, n, p): k = floor(x) return special.betainc(n, k+1, p) def _sf_skip(self, x, n, p): # skip because special.nbdtrc doesn't work for 0<n<1 k = floor(x) return special.nbdtrc(k, n, p) def _ppf(self, q, n, p): vals = ceil(special.nbdtrik(q, n, p)) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, n, p) return np.where(temp >= q, vals1, vals) def _stats(self, n, p): Q = 1.0 / p P = Q - 1.0 mu = n*P var = n*P*Q g1 = (Q+P)/sqrt(n*P*Q) g2 = (1.0 + 6*P*Q) / (n*P*Q) return mu, var, g1, g2 nbinom = nbinom_gen(name='nbinom') class geom_gen(rv_discrete): r"""A geometric discrete random variable. %(before_notes)s Notes ----- The probability mass function for `geom` is: .. math:: f(k) = (1-p)^{k-1} p for :math:`k \ge 1`. `geom` takes :math:`p` as shape parameter. %(after_notes)s See Also -------- planck %(example)s """ def _rvs(self, p, size=None, random_state=None): return random_state.geometric(p, size=size) def _argcheck(self, p): return (p <= 1) & (p >= 0) def _pmf(self, k, p): return np.power(1-p, k-1) * p def _logpmf(self, k, p): return special.xlog1py(k - 1, -p) + log(p) def _cdf(self, x, p): k = floor(x) return -expm1(log1p(-p)*k) def _sf(self, x, p): return np.exp(self._logsf(x, p)) def _logsf(self, x, p): k = floor(x) return k*log1p(-p) def _ppf(self, q, p): vals = ceil(log1p(-q) / log1p(-p)) temp = self._cdf(vals-1, p) return np.where((temp >= q) & (vals > 0), vals-1, vals) def _stats(self, p): mu = 1.0/p qr = 1.0-p var = qr / p / p g1 = (2.0-p) / sqrt(qr) g2 = np.polyval([1, -6, 6], p)/(1.0-p) return mu, var, g1, g2 geom = geom_gen(a=1, name='geom', longname="A geometric") class hypergeom_gen(rv_discrete): r"""A hypergeometric discrete random variable. The hypergeometric distribution models drawing objects from a bin. `M` is the total number of objects, `n` is total number of Type I objects. The random variate represents the number of Type I objects in `N` drawn without replacement from the total population. %(before_notes)s Notes ----- The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not universally accepted. See the Examples for a clarification of the definitions used here. The probability mass function is defined as, .. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}} {\binom{M}{N}} for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial coefficients are defined as, .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. %(after_notes)s Examples -------- >>> from scipy.stats import hypergeom >>> import matplotlib.pyplot as plt Suppose we have a collection of 20 animals, of which 7 are dogs. Then if we want to know the probability of finding a given number of dogs if we choose at random 12 of the 20 animals, we can initialize a frozen distribution and plot the probability mass function: >>> [M, n, N] = [20, 7, 12] >>> rv = hypergeom(M, n, N) >>> x = np.arange(0, n+1) >>> pmf_dogs = rv.pmf(x) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, pmf_dogs, 'bo') >>> ax.vlines(x, 0, pmf_dogs, lw=2) >>> ax.set_xlabel('# of dogs in our group of chosen animals') >>> ax.set_ylabel('hypergeom PMF') >>> plt.show() Instead of using a frozen distribution we can also use `hypergeom` methods directly. To for example obtain the cumulative distribution function, use: >>> prb = hypergeom.cdf(x, M, n, N) And to generate random numbers: >>> R = hypergeom.rvs(M, n, N, size=10) """ def _rvs(self, M, n, N, size=None, random_state=None): return random_state.hypergeometric(n, M-n, N, size=size) def _get_support(self, M, n, N): return np.maximum(N-(M-n), 0), np.minimum(n, N) def _argcheck(self, M, n, N): cond = (M > 0) & (n >= 0) & (N >= 0) cond &= (n <= M) & (N <= M) return cond def _logpmf(self, k, M, n, N): tot, good = M, n bad = tot - good result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) - betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) - betaln(tot+1, 1)) return result def _pmf(self, k, M, n, N): # same as the following but numerically more precise # return comb(good, k) * comb(bad, N-k) / comb(tot, N) return exp(self._logpmf(k, M, n, N)) def _stats(self, M, n, N): # tot, good, sample_size = M, n, N # "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n') M, n, N = 1.*M, 1.*n, 1.*N m = M - n p = n/M mu = N*p var = m*n*N*(M - N)*1.0/(M*M*(M-1)) g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N))) g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m g2 *= (M-1)*M*M g2 += 6.*n*N*(M-N)*m*(5.*M-6) g2 /= n * N * (M-N) * m * (M-2.) * (M-3.) return mu, var, g1, g2 def _entropy(self, M, n, N): k = np.r_[N - (M - n):min(n, N) + 1] vals = self.pmf(k, M, n, N) return np.sum(entr(vals), axis=0) def _sf(self, k, M, n, N): # This for loop is needed because `k` can be an array. If that's the # case, the sf() method makes M, n and N arrays of the same shape. We # therefore unpack all inputs args, so we can do the manual # integration. res = [] for quant, tot, good, draw in zip(k, M, n, N): # Manual integration over probability mass function. More accurate # than integrate.quad. k2 = np.arange(quant + 1, draw + 1) res.append(np.sum(self._pmf(k2, tot, good, draw))) return np.asarray(res) def _logsf(self, k, M, n, N): res = [] for quant, tot, good, draw in zip(k, M, n, N): if (quant + 0.5) * (tot + 0.5) < (good - 0.5) * (draw - 0.5): # Less terms to sum if we calculate log(1-cdf) res.append(log1p(-exp(self.logcdf(quant, tot, good, draw)))) else: # Integration over probability mass function using logsumexp k2 = np.arange(quant + 1, draw + 1) res.append(logsumexp(self._logpmf(k2, tot, good, draw))) return np.asarray(res) def _logcdf(self, k, M, n, N): res = [] for quant, tot, good, draw in zip(k, M, n, N): if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5): # Less terms to sum if we calculate log(1-sf) res.append(log1p(-exp(self.logsf(quant, tot, good, draw)))) else: # Integration over probability mass function using logsumexp k2 = np.arange(0, quant + 1) res.append(logsumexp(self._logpmf(k2, tot, good, draw))) return np.asarray(res) hypergeom = hypergeom_gen(name='hypergeom') # FIXME: Fails _cdfvec class logser_gen(rv_discrete): r"""A Logarithmic (Log-Series, Series) discrete random variable. %(before_notes)s Notes ----- The probability mass function for `logser` is: .. math:: f(k) = - \frac{p^k}{k \log(1-p)} for :math:`k \ge 1`. `logser` takes :math:`p` as shape parameter. %(after_notes)s %(example)s """ def _rvs(self, p, size=None, random_state=None): # looks wrong for p>0.5, too few k=1 # trying to use generic is worse, no k=1 at all return random_state.logseries(p, size=size) def _argcheck(self, p): return (p > 0) & (p < 1) def _pmf(self, k, p): # logser.pmf(k) = - p**k / (k*log(1-p)) return -np.power(p, k) * 1.0 / k / special.log1p(-p) def _stats(self, p): r = special.log1p(-p) mu = p / (p - 1.0) / r mu2p = -p / r / (p - 1.0)**2 var = mu2p - mu*mu mu3p = -p / r * (1.0+p) / (1.0 - p)**3 mu3 = mu3p - 3*mu*mu2p + 2*mu**3 g1 = mu3 / np.power(var, 1.5) mu4p = -p / r * ( 1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4) mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 g2 = mu4 / var**2 - 3.0 return mu, var, g1, g2 logser = logser_gen(a=1, name='logser', longname='A logarithmic') class poisson_gen(rv_discrete): r"""A Poisson discrete random variable. %(before_notes)s Notes ----- The probability mass function for `poisson` is: .. math:: f(k) = \exp(-\mu) \frac{\mu^k}{k!} for :math:`k \ge 0`. `poisson` takes :math:`\mu` as shape parameter. %(after_notes)s %(example)s """ # Override rv_discrete._argcheck to allow mu=0. def _argcheck(self, mu): return mu >= 0 def _rvs(self, mu, size=None, random_state=None): return random_state.poisson(mu, size) def _logpmf(self, k, mu): Pk = special.xlogy(k, mu) - gamln(k + 1) - mu return Pk def _pmf(self, k, mu): # poisson.pmf(k) = exp(-mu) * mu**k / k! return exp(self._logpmf(k, mu)) def _cdf(self, x, mu): k = floor(x) return special.pdtr(k, mu) def _sf(self, x, mu): k = floor(x) return special.pdtrc(k, mu) def _ppf(self, q, mu): vals = ceil(special.pdtrik(q, mu)) vals1 = np.maximum(vals - 1, 0) temp = special.pdtr(vals1, mu) return np.where(temp >= q, vals1, vals) def _stats(self, mu): var = mu tmp = np.asarray(mu) mu_nonzero = tmp > 0 g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf) g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf) return mu, var, g1, g2 poisson = poisson_gen(name="poisson", longname='A Poisson') class planck_gen(rv_discrete): r"""A Planck discrete exponential random variable. %(before_notes)s Notes ----- The probability mass function for `planck` is: .. math:: f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) for :math:`k \ge 0` and :math:`\lambda > 0`. `planck` takes :math:`\lambda` as shape parameter. The Planck distribution can be written as a geometric distribution (`geom`) with :math:`p = 1 - \exp(-\lambda)` shifted by `loc = -1`. %(after_notes)s See Also -------- geom %(example)s """ def _argcheck(self, lambda_): return lambda_ > 0 def _pmf(self, k, lambda_): return -expm1(-lambda_)*exp(-lambda_*k) def _cdf(self, x, lambda_): k = floor(x) return -expm1(-lambda_*(k+1)) def _sf(self, x, lambda_): return exp(self._logsf(x, lambda_)) def _logsf(self, x, lambda_): k = floor(x) return -lambda_*(k+1) def _ppf(self, q, lambda_): vals = ceil(-1.0/lambda_ * log1p(-q)-1) vals1 = (vals-1).clip(*(self._get_support(lambda_))) temp = self._cdf(vals1, lambda_) return np.where(temp >= q, vals1, vals) def _rvs(self, lambda_, size=None, random_state=None): # use relation to geometric distribution for sampling p = -expm1(-lambda_) return random_state.geometric(p, size=size) - 1.0 def _stats(self, lambda_): mu = 1/expm1(lambda_) var = exp(-lambda_)/(expm1(-lambda_))**2 g1 = 2*cosh(lambda_/2.0) g2 = 4+2*cosh(lambda_) return mu, var, g1, g2 def _entropy(self, lambda_): C = -expm1(-lambda_) return lambda_*exp(-lambda_)/C - log(C) planck = planck_gen(a=0, name='planck', longname='A discrete exponential ') class boltzmann_gen(rv_discrete): r"""A Boltzmann (Truncated Discrete Exponential) random variable. %(before_notes)s Notes ----- The probability mass function for `boltzmann` is: .. math:: f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) / (1-\exp(-\lambda N)) for :math:`k = 0,..., N-1`. `boltzmann` takes :math:`\lambda > 0` and :math:`N > 0` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, lambda_, N): return (lambda_ > 0) & (N > 0) def _get_support(self, lambda_, N): return self.a, N - 1 def _pmf(self, k, lambda_, N): # boltzmann.pmf(k) = # (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N)) fact = (1-exp(-lambda_))/(1-exp(-lambda_*N)) return fact*exp(-lambda_*k) def _cdf(self, x, lambda_, N): k = floor(x) return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N)) def _ppf(self, q, lambda_, N): qnew = q*(1-exp(-lambda_*N)) vals = ceil(-1.0/lambda_ * log(1-qnew)-1) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, lambda_, N) return np.where(temp >= q, vals1, vals) def _stats(self, lambda_, N): z = exp(-lambda_) zN = exp(-lambda_*N) mu = z/(1.0-z)-N*zN/(1-zN) var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2 trm = (1-zN)/(1-z) trm2 = (z*trm**2 - N*N*zN) g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN) g1 = g1 / trm2**(1.5) g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN) g2 = g2 / trm2 / trm2 return mu, var, g1, g2 boltzmann = boltzmann_gen(name='boltzmann', a=0, longname='A truncated discrete exponential ') class randint_gen(rv_discrete): r"""A uniform discrete random variable. %(before_notes)s Notes ----- The probability mass function for `randint` is: .. math:: f(k) = \frac{1}{high - low} for ``k = low, ..., high - 1``. `randint` takes ``low`` and ``high`` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, low, high): return (high > low) def _get_support(self, low, high): return low, high-1 def _pmf(self, k, low, high): # randint.pmf(k) = 1./(high - low) p = np.ones_like(k) / (high - low) return np.where((k >= low) & (k < high), p, 0.) def _cdf(self, x, low, high): k = floor(x) return (k - low + 1.) / (high - low) def _ppf(self, q, low, high): vals = ceil(q * (high - low) + low) - 1 vals1 = (vals - 1).clip(low, high) temp = self._cdf(vals1, low, high) return np.where(temp >= q, vals1, vals) def _stats(self, low, high): m2, m1 = np.asarray(high), np.asarray(low) mu = (m2 + m1 - 1.0) / 2 d = m2 - m1 var = (d*d - 1) / 12.0 g1 = 0.0 g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0) return mu, var, g1, g2 def _rvs(self, low, high, size=None, random_state=None): """An array of *size* random integers >= ``low`` and < ``high``.""" if np.asarray(low).size == 1 and np.asarray(high).size == 1: # no need to vectorize in that case return rng_integers(random_state, low, high, size=size) if size is not None: # NumPy's RandomState.randint() doesn't broadcast its arguments. # Use `broadcast_to()` to extend the shapes of low and high # up to size. Then we can use the numpy.vectorize'd # randint without needing to pass it a `size` argument. low = np.broadcast_to(low, size) high = np.broadcast_to(high, size) randint = np.vectorize(partial(rng_integers, random_state), otypes=[np.int_]) return randint(low, high) def _entropy(self, low, high): return log(high - low) randint = randint_gen(name='randint', longname='A discrete uniform ' '(random integer)') # FIXME: problems sampling. class zipf_gen(rv_discrete): r"""A Zipf discrete random variable. %(before_notes)s Notes ----- The probability mass function for `zipf` is: .. math:: f(k, a) = \frac{1}{\zeta(a) k^a} for :math:`k \ge 1`. `zipf` takes :math:`a` as shape parameter. :math:`\zeta` is the Riemann zeta function (`scipy.special.zeta`) %(after_notes)s %(example)s """ def _rvs(self, a, size=None, random_state=None): return random_state.zipf(a, size=size) def _argcheck(self, a): return a > 1 def _pmf(self, k, a): # zipf.pmf(k, a) = 1/(zeta(a) * k**a) Pk = 1.0 / special.zeta(a, 1) / k**a return Pk def _munp(self, n, a): return _lazywhere( a > n + 1, (a, n), lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1), np.inf) zipf = zipf_gen(a=1, name='zipf', longname='A Zipf') class dlaplace_gen(rv_discrete): r"""A Laplacian discrete random variable. %(before_notes)s Notes ----- The probability mass function for `dlaplace` is: .. math:: f(k) = \tanh(a/2) \exp(-a |k|) for integers :math:`k` and :math:`a > 0`. `dlaplace` takes :math:`a` as shape parameter. %(after_notes)s %(example)s """ def _pmf(self, k, a): # dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k)) return tanh(a/2.0) * exp(-a * abs(k)) def _cdf(self, x, a): k = floor(x) f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1) f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1) return _lazywhere(k >= 0, (k, a), f=f, f2=f2) def _ppf(self, q, a): const = 1 + exp(a) vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1, -log((1-q) * const) / a)) vals1 = vals - 1 return np.where(self._cdf(vals1, a) >= q, vals1, vals) def _stats(self, a): ea = exp(a) mu2 = 2.*ea/(ea-1.)**2 mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4 return 0., mu2, 0., mu4/mu2**2 - 3. def _entropy(self, a): return a / sinh(a) - log(tanh(a/2.0)) def _rvs(self, a, size=None, random_state=None): # The discrete Laplace is equivalent to the two-sided geometric # distribution with PMF: # f(k) = (1 - alpha)/(1 + alpha) * alpha^abs(k) # Reference: # https://www.sciencedirect.com/science/ # article/abs/pii/S0378375804003519 # Furthermore, the two-sided geometric distribution is # equivalent to the difference between two iid geometric # distributions. # Reference (page 179): # https://pdfs.semanticscholar.org/61b3/ # b99f466815808fd0d03f5d2791eea8b541a1.pdf # Thus, we can leverage the following: # 1) alpha = e^-a # 2) probability_of_success = 1 - alpha (Bernoulli trial) probOfSuccess = -np.expm1(-np.asarray(a)) x = random_state.geometric(probOfSuccess, size=size) y = random_state.geometric(probOfSuccess, size=size) return x - y dlaplace = dlaplace_gen(a=-np.inf, name='dlaplace', longname='A discrete Laplacian') class skellam_gen(rv_discrete): r"""A Skellam discrete random variable. %(before_notes)s Notes ----- Probability distribution of the difference of two correlated or uncorrelated Poisson random variables. Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with expected values :math:`\lambda_1` and :math:`\lambda_2`. Then, :math:`k_1 - k_2` follows a Skellam distribution with parameters :math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and :math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where :math:`\rho` is the correlation coefficient between :math:`k_1` and :math:`k_2`. If the two Poisson-distributed r.v. are independent then :math:`\rho = 0`. Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive. For details see: https://en.wikipedia.org/wiki/Skellam_distribution `skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters. %(after_notes)s %(example)s """ def _rvs(self, mu1, mu2, size=None, random_state=None): n = size return (random_state.poisson(mu1, n) - random_state.poisson(mu2, n)) def _pmf(self, x, mu1, mu2): px = np.where(x < 0, _ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2, _ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2) # ncx2.pdf() returns nan's for extremely low probabilities return px def _cdf(self, x, mu1, mu2): x = floor(x) px = np.where(x < 0, _ncx2_cdf(2*mu2, -2*x, 2*mu1), 1 - _ncx2_cdf(2*mu1, 2*(x+1), 2*mu2)) return px def _stats(self, mu1, mu2): mean = mu1 - mu2 var = mu1 + mu2 g1 = mean / sqrt((var)**3) g2 = 1 / var return mean, var, g1, g2 skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam') class yulesimon_gen(rv_discrete): r"""A Yule-Simon discrete random variable. %(before_notes)s Notes ----- The probability mass function for the `yulesimon` is: .. math:: f(k) = \alpha B(k, \alpha+1) for :math:`k=1,2,3,...`, where :math:`\alpha>0`. Here :math:`B` refers to the `scipy.special.beta` function. The sampling of random variates is based on pg 553, Section 6.3 of [1]_. Our notation maps to the referenced logic via :math:`\alpha=a-1`. For details see the wikipedia entry [2]_. References ---------- .. [1] Devroye, Luc. "Non-uniform Random Variate Generation", (1986) Springer, New York. .. [2] https://en.wikipedia.org/wiki/Yule-Simon_distribution %(after_notes)s %(example)s """ def _rvs(self, alpha, size=None, random_state=None): E1 = random_state.standard_exponential(size) E2 = random_state.standard_exponential(size) ans = ceil(-E1 / log1p(-exp(-E2 / alpha))) return ans def _pmf(self, x, alpha): return alpha * special.beta(x, alpha + 1) def _argcheck(self, alpha): return (alpha > 0) def _logpmf(self, x, alpha): return log(alpha) + special.betaln(x, alpha + 1) def _cdf(self, x, alpha): return 1 - x * special.beta(x, alpha + 1) def _sf(self, x, alpha): return x * special.beta(x, alpha + 1) def _logsf(self, x, alpha): return log(x) + special.betaln(x, alpha + 1) def _stats(self, alpha): mu = np.where(alpha <= 1, np.inf, alpha / (alpha - 1)) mu2 = np.where(alpha > 2, alpha**2 / ((alpha - 2.0) * (alpha - 1)**2), np.inf) mu2 = np.where(alpha <= 1, np.nan, mu2) g1 = np.where(alpha > 3, sqrt(alpha - 2) * (alpha + 1)**2 / (alpha * (alpha - 3)), np.inf) g1 = np.where(alpha <= 2, np.nan, g1) g2 = np.where(alpha > 4, (alpha + 3) + (alpha**3 - 49 * alpha - 22) / (alpha * (alpha - 4) * (alpha - 3)), np.inf) g2 = np.where(alpha <= 2, np.nan, g2) return mu, mu2, g1, g2 yulesimon = yulesimon_gen(name='yulesimon', a=1) # Collect names of classes and objects in this module. pairs = list(globals().items()) _distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete) __all__ = _distn_names + _distn_gen_names
bsd-3-clause
brenthuisman/phd_tools
plot.1d.rel.py
1
1611
#!/usr/bin/env python import numpy,sys,matplotlib.pyplot as plt,operator,pickle #print plt.style.available #plt.style.use('ggplot') nrprim = 600*28800000 if len(sys.argv) < 3: print "Specify at least one input and one output. First input is the one the output is relative to." sys.exit() filesin = sys.argv[1:-1] fileout = sys.argv[-1] data = [] #rows=files,column[0]=filename for filename in filesin: datatmp = [filename] if filename.endswith('.txt'): header = open(filename,'r') for line in header: newline = line.strip() datatmp.append(float(newline.split()[-1])) if filename.endswith('.raw'): datatmp.extend(numpy.fromfile(filename, dtype='<f4').tolist()) if filename.endswith('.pylist'): datatmp.extend(pickle.load(open(filename))) data.append(datatmp) datanew = [] for dataset in data[::-1]: #start at the end datatmp = [] for index,val in enumerate(dataset): if index is 0: datatmp.append(val) continue #do not modify filename try: datatmp.append((data[0][index]-val) / data[0][index]) except ZeroDivisionError: datatmp.append(0) datanew.append(datatmp) #for datindex in range(1,len(data)): #start at the end # for valindex in range(datindex): # if valindex is 0: # continue #do not modify filename # try: # data[datindex][valindex] = (data[0][valindex]-data[datindex][valindex]) / data[0][valindex] # except ZeroDivisionError: # val = 0 for dataset in datanew: plt.plot(dataset[1:], label=dataset[0],alpha=0.5) plt.ylabel('Yield') #plt.ylabel('PG energy') #plt.legend(loc=4,prop={'size':6}) plt.legend(prop={'size':10}) plt.savefig(fileout)
lgpl-3.0
Srisai85/scikit-learn
examples/semi_supervised/plot_label_propagation_digits.py
268
2723
""" =================================================== Label Propagation digits: Demonstrating performance =================================================== This example demonstrates the power of semisupervised learning by training a Label Spreading model to classify handwritten digits with sets of very few labels. The handwritten digit dataset has 1797 total points. The model will be trained using all points, but only 30 will be labeled. Results in the form of a confusion matrix and a series of metrics over each class will be very good. At the end, the top 10 most uncertain predictions will be shown. """ print(__doc__) # Authors: Clay Woolam <[email protected]> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn import datasets from sklearn.semi_supervised import label_propagation from sklearn.metrics import confusion_matrix, classification_report digits = datasets.load_digits() rng = np.random.RandomState(0) indices = np.arange(len(digits.data)) rng.shuffle(indices) X = digits.data[indices[:330]] y = digits.target[indices[:330]] images = digits.images[indices[:330]] n_total_samples = len(y) n_labeled_points = 30 indices = np.arange(n_total_samples) unlabeled_set = indices[n_labeled_points:] # shuffle everything around y_train = np.copy(y) y_train[unlabeled_set] = -1 ############################################################################### # Learn with LabelSpreading lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_set] true_labels = y[unlabeled_set] cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_) print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)) print(classification_report(true_labels, predicted_labels)) print("Confusion matrix") print(cm) # calculate uncertainty values for each transduced distribution pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T) # pick the top 10 most uncertain labels uncertainty_index = np.argsort(pred_entropies)[-10:] ############################################################################### # plot f = plt.figure(figsize=(7, 5)) for index, image_index in enumerate(uncertainty_index): image = images[image_index] sub = f.add_subplot(2, 5, index + 1) sub.imshow(image, cmap=plt.cm.gray_r) plt.xticks([]) plt.yticks([]) sub.set_title('predict: %i\ntrue: %i' % ( lp_model.transduction_[image_index], y[image_index])) f.suptitle('Learning with small amount of labeled data') plt.show()
bsd-3-clause
IntelLabs/hpat
examples/dataframe/rolling/dataframe_rolling_kurt.py
1
1841
# ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** import pandas as pd from numba import njit @njit def df_rolling_kurt(): df = pd.DataFrame({'A': [4, 3, 5, 2, 6], 'B': [-4, -3, -5, -2, -6]}) out_df = df.rolling(4).kurt() # Expect DataFrame of # {'A': [NaN, NaN, NaN, -1.2, -3.3], 'B': [NaN, NaN, NaN, -1.2, -3.3]} return out_df print(df_rolling_kurt())
bsd-2-clause
Clyde-fare/scikit-learn
sklearn/tests/test_isotonic.py
230
11087
import numpy as np import pickle from sklearn.isotonic import (check_increasing, isotonic_regression, IsotonicRegression) from sklearn.utils.testing import (assert_raises, assert_array_equal, assert_true, assert_false, assert_equal, assert_array_almost_equal, assert_warns_message, assert_no_warnings) from sklearn.utils import shuffle def test_permutation_invariance(): # check that fit is permuation invariant. # regression test of missing sorting of sample-weights ir = IsotonicRegression() x = [1, 2, 3, 4, 5, 6, 7] y = [1, 41, 51, 1, 2, 5, 24] sample_weight = [1, 2, 3, 4, 5, 6, 7] x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0) y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight) y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x) assert_array_equal(y_transformed, y_transformed_s) def test_check_increasing_up(): x = [0, 1, 2, 3, 4, 5] y = [0, 1.5, 2.77, 8.99, 8.99, 50] # Check that we got increasing=True and no warnings is_increasing = assert_no_warnings(check_increasing, x, y) assert_true(is_increasing) def test_check_increasing_up_extreme(): x = [0, 1, 2, 3, 4, 5] y = [0, 1, 2, 3, 4, 5] # Check that we got increasing=True and no warnings is_increasing = assert_no_warnings(check_increasing, x, y) assert_true(is_increasing) def test_check_increasing_down(): x = [0, 1, 2, 3, 4, 5] y = [0, -1.5, -2.77, -8.99, -8.99, -50] # Check that we got increasing=False and no warnings is_increasing = assert_no_warnings(check_increasing, x, y) assert_false(is_increasing) def test_check_increasing_down_extreme(): x = [0, 1, 2, 3, 4, 5] y = [0, -1, -2, -3, -4, -5] # Check that we got increasing=False and no warnings is_increasing = assert_no_warnings(check_increasing, x, y) assert_false(is_increasing) def test_check_ci_warn(): x = [0, 1, 2, 3, 4, 5] y = [0, -1, 2, -3, 4, -5] # Check that we got increasing=False and CI interval warning is_increasing = assert_warns_message(UserWarning, "interval", check_increasing, x, y) assert_false(is_increasing) def test_isotonic_regression(): y = np.array([3, 7, 5, 9, 8, 7, 10]) y_ = np.array([3, 6, 6, 8, 8, 8, 10]) assert_array_equal(y_, isotonic_regression(y)) x = np.arange(len(y)) ir = IsotonicRegression(y_min=0., y_max=1.) ir.fit(x, y) assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y)) assert_array_equal(ir.transform(x), ir.predict(x)) # check that it is immune to permutation perm = np.random.permutation(len(y)) ir = IsotonicRegression(y_min=0., y_max=1.) assert_array_equal(ir.fit_transform(x[perm], y[perm]), ir.fit_transform(x, y)[perm]) assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm]) # check we don't crash when all x are equal: ir = IsotonicRegression() assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y)) def test_isotonic_regression_ties_min(): # Setup examples with ties on minimum x = [0, 1, 1, 2, 3, 4, 5] y = [0, 1, 2, 3, 4, 5, 6] y_true = [0, 1.5, 1.5, 3, 4, 5, 6] # Check that we get identical results for fit/transform and fit_transform ir = IsotonicRegression() ir.fit(x, y) assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y)) assert_array_equal(y_true, ir.fit_transform(x, y)) def test_isotonic_regression_ties_max(): # Setup examples with ties on maximum x = [1, 2, 3, 4, 5, 5] y = [1, 2, 3, 4, 5, 6] y_true = [1, 2, 3, 4, 5.5, 5.5] # Check that we get identical results for fit/transform and fit_transform ir = IsotonicRegression() ir.fit(x, y) assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y)) assert_array_equal(y_true, ir.fit_transform(x, y)) def test_isotonic_regression_ties_secondary_(): """ Test isotonic regression fit, transform and fit_transform against the "secondary" ties method and "pituitary" data from R "isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair, Isotone Optimization in R: Pool-Adjacent-Violators Algorithm (PAVA) and Active Set Methods Set values based on pituitary example and the following R command detailed in the paper above: > library("isotone") > data("pituitary") > res1 <- gpava(pituitary$age, pituitary$size, ties="secondary") > res1$x `isotone` version: 1.0-2, 2014-09-07 R version: R version 3.1.1 (2014-07-10) """ x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14] y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25] y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 24.25, 24.25] # Check fit, transform and fit_transform ir = IsotonicRegression() ir.fit(x, y) assert_array_almost_equal(ir.transform(x), y_true, 4) assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4) def test_isotonic_regression_reversed(): y = np.array([10, 9, 10, 7, 6, 6.1, 5]) y_ = IsotonicRegression(increasing=False).fit_transform( np.arange(len(y)), y) assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0)) def test_isotonic_regression_auto_decreasing(): # Set y and x for decreasing y = np.array([10, 9, 10, 7, 6, 6.1, 5]) x = np.arange(len(y)) # Create model and fit_transform ir = IsotonicRegression(increasing='auto') y_ = assert_no_warnings(ir.fit_transform, x, y) # Check that relationship decreases is_increasing = y_[0] < y_[-1] assert_false(is_increasing) def test_isotonic_regression_auto_increasing(): # Set y and x for decreasing y = np.array([5, 6.1, 6, 7, 10, 9, 10]) x = np.arange(len(y)) # Create model and fit_transform ir = IsotonicRegression(increasing='auto') y_ = assert_no_warnings(ir.fit_transform, x, y) # Check that relationship increases is_increasing = y_[0] < y_[-1] assert_true(is_increasing) def test_assert_raises_exceptions(): ir = IsotonicRegression() rng = np.random.RandomState(42) assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6]) assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7]) assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2]) assert_raises(ValueError, ir.transform, rng.randn(3, 10)) def test_isotonic_sample_weight_parameter_default_value(): # check if default value of sample_weight parameter is one ir = IsotonicRegression() # random test data rng = np.random.RandomState(42) n = 100 x = np.arange(n) y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n)) # check if value is correctly used weights = np.ones(n) y_set_value = ir.fit_transform(x, y, sample_weight=weights) y_default_value = ir.fit_transform(x, y) assert_array_equal(y_set_value, y_default_value) def test_isotonic_min_max_boundaries(): # check if min value is used correctly ir = IsotonicRegression(y_min=2, y_max=4) n = 6 x = np.arange(n) y = np.arange(n) y_test = [2, 2, 2, 3, 4, 4] y_result = np.round(ir.fit_transform(x, y)) assert_array_equal(y_result, y_test) def test_isotonic_sample_weight(): ir = IsotonicRegression() x = [1, 2, 3, 4, 5, 6, 7] y = [1, 41, 51, 1, 2, 5, 24] sample_weight = [1, 2, 3, 4, 5, 6, 7] expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24] received_y = ir.fit_transform(x, y, sample_weight=sample_weight) assert_array_equal(expected_y, received_y) def test_isotonic_regression_oob_raise(): # Set y and x y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing='auto', out_of_bounds="raise") ir.fit(x, y) # Check that an exception is thrown assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10]) def test_isotonic_regression_oob_clip(): # Set y and x y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing='auto', out_of_bounds="clip") ir.fit(x, y) # Predict from training and test x and check that min/max match. y1 = ir.predict([min(x) - 10, max(x) + 10]) y2 = ir.predict(x) assert_equal(max(y1), max(y2)) assert_equal(min(y1), min(y2)) def test_isotonic_regression_oob_nan(): # Set y and x y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing='auto', out_of_bounds="nan") ir.fit(x, y) # Predict from training and test x and check that we have two NaNs. y1 = ir.predict([min(x) - 10, max(x) + 10]) assert_equal(sum(np.isnan(y1)), 2) def test_isotonic_regression_oob_bad(): # Set y and x y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz") # Make sure that we throw an error for bad out_of_bounds value assert_raises(ValueError, ir.fit, x, y) def test_isotonic_regression_oob_bad_after(): # Set y and x y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing='auto', out_of_bounds="raise") # Make sure that we throw an error for bad out_of_bounds value in transform ir.fit(x, y) ir.out_of_bounds = "xyz" assert_raises(ValueError, ir.transform, x) def test_isotonic_regression_pickle(): y = np.array([3, 7, 5, 9, 8, 7, 10]) x = np.arange(len(y)) # Create model and fit ir = IsotonicRegression(increasing='auto', out_of_bounds="clip") ir.fit(x, y) ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL) ir2 = pickle.loads(ir_ser) np.testing.assert_array_equal(ir.predict(x), ir2.predict(x)) def test_isotonic_duplicate_min_entry(): x = [0, 0, 1] y = [0, 0, 1] ir = IsotonicRegression(increasing=True, out_of_bounds="clip") ir.fit(x, y) all_predictions_finite = np.all(np.isfinite(ir.predict(x))) assert_true(all_predictions_finite) def test_isotonic_zero_weight_loop(): # Test from @ogrisel's issue: # https://github.com/scikit-learn/scikit-learn/issues/4297 # Get deterministic RNG with seed rng = np.random.RandomState(42) # Create regression and samples regression = IsotonicRegression() n_samples = 50 x = np.linspace(-3, 3, n_samples) y = x + rng.uniform(size=n_samples) # Get some random weights and zero out w = rng.uniform(size=n_samples) w[5:8] = 0 regression.fit(x, y, sample_weight=w) # This will hang in failure case. regression.fit(x, y, sample_weight=w)
bsd-3-clause
liberatorqjw/scikit-learn
sklearn/tests/test_naive_bayes.py
16
12584
import pickle from io import BytesIO import numpy as np import scipy.sparse from sklearn.datasets import load_digits from sklearn.cross_validation import cross_val_score from sklearn.externals.six.moves import zip from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB # Data is just 6 separable points in the plane X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) y = np.array([1, 1, 1, 2, 2, 2]) # A bit more random tests rng = np.random.RandomState(0) X1 = rng.normal(size=(10, 3)) y1 = (rng.normal(size=(10)) > 0).astype(np.int) # Data is 6 random integer points in a 100 dimensional space classified to # three classes. X2 = rng.randint(5, size=(6, 100)) y2 = np.array([1, 1, 2, 2, 3, 3]) def test_gnb(): """ Gaussian Naive Bayes classification. This checks that GaussianNB implements fit and predict and returns correct values for a simple toy dataset. """ clf = GaussianNB() y_pred = clf.fit(X, y).predict(X) assert_array_equal(y_pred, y) y_pred_proba = clf.predict_proba(X) y_pred_log_proba = clf.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8) def test_gnb_prior(): """Test whether class priors are properly set. """ clf = GaussianNB().fit(X, y) assert_array_almost_equal(np.array([3, 3]) / 6.0, clf.class_prior_, 8) clf.fit(X1, y1) # Check that the class priors sum to 1 assert_array_almost_equal(clf.class_prior_.sum(), 1) def test_discrete_prior(): """Test whether class priors are properly set. """ for cls in [BernoulliNB, MultinomialNB]: clf = cls().fit(X2, y2) assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0), clf.class_log_prior_, 8) def test_mnnb(): """Test Multinomial Naive Bayes classification. This checks that MultinomialNB implements fit and predict and returns correct values for a simple toy dataset. """ for X in [X2, scipy.sparse.csr_matrix(X2)]: # Check the ability to predict the learning set. clf = MultinomialNB() assert_raises(ValueError, clf.fit, -X, y2) y_pred = clf.fit(X, y2).predict(X) assert_array_equal(y_pred, y2) # Verify that np.log(clf.predict_proba(X)) gives the same results as # clf.predict_log_proba(X) y_pred_proba = clf.predict_proba(X) y_pred_log_proba = clf.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8) # Check that incremental fitting yields the same results clf2 = MultinomialNB() clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2)) clf2.partial_fit(X[2:5], y2[2:5]) clf2.partial_fit(X[5:], y2[5:]) y_pred2 = clf2.predict(X) assert_array_equal(y_pred2, y2) y_pred_proba2 = clf2.predict_proba(X) y_pred_log_proba2 = clf2.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8) assert_array_almost_equal(y_pred_proba2, y_pred_proba) assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba) # Partial fit on the whole data at once should be the same as fit too clf3 = MultinomialNB() clf3.partial_fit(X, y2, classes=np.unique(y2)) y_pred3 = clf3.predict(X) assert_array_equal(y_pred3, y2) y_pred_proba3 = clf3.predict_proba(X) y_pred_log_proba3 = clf3.predict_log_proba(X) assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8) assert_array_almost_equal(y_pred_proba3, y_pred_proba) assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba) def check_partial_fit(cls): clf1 = cls() clf1.fit([[0, 1], [1, 0]], [0, 1]) clf2 = cls() clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1]) assert_array_equal(clf1.class_count_, clf2.class_count_) assert_array_equal(clf1.feature_count_, clf2.feature_count_) clf3 = cls() clf3.partial_fit([[0, 1]], [0], classes=[0, 1]) clf3.partial_fit([[1, 0]], [1]) assert_array_equal(clf1.class_count_, clf3.class_count_) assert_array_equal(clf1.feature_count_, clf3.feature_count_) def test_discretenb_partial_fit(): for cls in [MultinomialNB, BernoulliNB]: yield check_partial_fit, cls def test_gnb_partial_fit(): clf = GaussianNB().fit(X, y) clf_pf = GaussianNB().partial_fit(X, y, np.unique(y)) assert_array_almost_equal(clf.theta_, clf_pf.theta_) assert_array_almost_equal(clf.sigma_, clf_pf.sigma_) assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_) clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y)) clf_pf2.partial_fit(X[1::2], y[1::2]) assert_array_almost_equal(clf.theta_, clf_pf2.theta_) assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_) assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_) def test_discretenb_pickle(): """Test picklability of discrete naive Bayes classifiers""" for cls in [BernoulliNB, MultinomialNB, GaussianNB]: clf = cls().fit(X2, y2) y_pred = clf.predict(X2) store = BytesIO() pickle.dump(clf, store) clf = pickle.load(BytesIO(store.getvalue())) assert_array_equal(y_pred, clf.predict(X2)) if cls is not GaussianNB: # TODO re-enable me when partial_fit is implemented for GaussianNB # Test pickling of estimator trained with partial_fit clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2)) clf2.partial_fit(X2[3:], y2[3:]) store = BytesIO() pickle.dump(clf2, store) clf2 = pickle.load(BytesIO(store.getvalue())) assert_array_equal(y_pred, clf2.predict(X2)) def test_input_check_fit(): """Test input checks for the fit method""" for cls in [BernoulliNB, MultinomialNB, GaussianNB]: # check shape consistency for number of samples at fit time assert_raises(ValueError, cls().fit, X2, y2[:-1]) # check shape consistency for number of input features at predict time clf = cls().fit(X2, y2) assert_raises(ValueError, clf.predict, X2[:, :-1]) def test_input_check_partial_fit(): for cls in [BernoulliNB, MultinomialNB]: # check shape consistency assert_raises(ValueError, cls().partial_fit, X2, y2[:-1], classes=np.unique(y2)) # classes is required for first call to partial fit assert_raises(ValueError, cls().partial_fit, X2, y2) # check consistency of consecutive classes values clf = cls() clf.partial_fit(X2, y2, classes=np.unique(y2)) assert_raises(ValueError, clf.partial_fit, X2, y2, classes=np.arange(42)) # check consistency of input shape for partial_fit assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2) # check consistency of input shape for predict assert_raises(ValueError, clf.predict, X2[:, :-1]) def test_discretenb_predict_proba(): """Test discrete NB classes' probability scores""" # The 100s below distinguish Bernoulli from multinomial. # FIXME: write a test to show this. X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]] X_multinomial = [[0, 1], [1, 3], [4, 0]] # test binary case (1-d output) y = [0, 0, 2] # 2 is regression test for binary case, 02e673 for cls, X in zip([BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial]): clf = cls().fit(X, y) assert_equal(clf.predict(X[-1]), 2) assert_equal(clf.predict_proba(X[0]).shape, (1, 2)) assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1), np.array([1., 1.]), 6) # test multiclass case (2-d output, must sum to one) y = [0, 1, 2] for cls, X in zip([BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial]): clf = cls().fit(X, y) assert_equal(clf.predict_proba(X[0]).shape, (1, 3)) assert_equal(clf.predict_proba(X[:2]).shape, (2, 3)) assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1) assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1) assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1) assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1) def test_discretenb_uniform_prior(): """Test whether discrete NB classes fit a uniform prior when fit_prior=False and class_prior=None""" for cls in [BernoulliNB, MultinomialNB]: clf = cls() clf.set_params(fit_prior=False) clf.fit([[0], [0], [1]], [0, 0, 1]) prior = np.exp(clf.class_log_prior_) assert_array_equal(prior, np.array([.5, .5])) def test_discretenb_provide_prior(): """Test whether discrete NB classes use provided prior""" for cls in [BernoulliNB, MultinomialNB]: clf = cls(class_prior=[0.5, 0.5]) clf.fit([[0], [0], [1]], [0, 0, 1]) prior = np.exp(clf.class_log_prior_) assert_array_equal(prior, np.array([.5, .5])) # Inconsistent number of classes with prior assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2]) assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1], classes=[0, 1, 1]) def test_sample_weight_multiclass(): for cls in [BernoulliNB, MultinomialNB]: # check shape consistency for number of samples at fit time yield check_sample_weight_multiclass, cls def check_sample_weight_multiclass(cls): X = [ [0, 0, 1], [0, 1, 1], [0, 1, 1], [1, 0, 0], ] y = [0, 0, 1, 2] sample_weight = np.array([1, 1, 2, 2], dtype=np.float) sample_weight /= sample_weight.sum() clf = cls().fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), [0, 1, 1, 2]) # Check sample weight using the partial_fit method clf = cls() clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2], sample_weight=sample_weight[:2]) clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3]) clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:]) assert_array_equal(clf.predict(X), [0, 1, 1, 2]) def test_sample_weight_mnb(): clf = MultinomialNB() clf.fit([[1, 2], [1, 2], [1, 0]], [0, 0, 1], sample_weight=[1, 1, 4]) assert_array_equal(clf.predict([1, 0]), [1]) positive_prior = np.exp(clf.intercept_[0]) assert_array_almost_equal([1 - positive_prior, positive_prior], [1 / 3., 2 / 3.]) def test_coef_intercept_shape(): """coef_ and intercept_ should have shapes as in other linear models. Non-regression test for issue #2127. """ X = [[1, 0, 0], [1, 1, 1]] y = [1, 2] # binary classification for clf in [MultinomialNB(), BernoulliNB()]: clf.fit(X, y) assert_equal(clf.coef_.shape, (1, 3)) assert_equal(clf.intercept_.shape, (1,)) def test_check_accuracy_on_digits(): # Non regression test to make sure that any further refactoring / optim # of the NB models do not harm the performance on a slightly non-linearly # separable dataset digits = load_digits() X, y = digits.data, digits.target binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8) X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8] # Multinomial NB scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10) assert_greater(scores.mean(), 0.86) scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10) assert_greater(scores.mean(), 0.94) # Bernoulli NB scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10) assert_greater(scores.mean(), 0.83) scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10) assert_greater(scores.mean(), 0.92) # Gaussian NB scores = cross_val_score(GaussianNB(), X, y, cv=10) assert_greater(scores.mean(), 0.77) scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10) assert_greater(scores.mean(), 0.86)
bsd-3-clause
kcompher/BuildingMachineLearningSystemsWithPython
ch05/PosTagFreqVectorizer.py
27
9486
# This code is supporting material for the book # Building Machine Learning Systems with Python # by Willi Richert and Luis Pedro Coelho # published by PACKT Publishing # # It is made available under the MIT License import re from operator import itemgetter from collections import Mapping import scipy.sparse as sp from sklearn.base import BaseEstimator from sklearn.feature_extraction.text import strip_accents_ascii, strip_accents_unicode import nltk from collections import Counter try: import ujson as json # UltraJSON if available except: import json poscache_filename = "poscache.json" class PosCounter(Counter): def __init__(self, iterable=(), normalize=True, poscache=None, **kwargs): self.n_sents = 0 self.normalize = normalize self.poscache = poscache super(PosCounter, self).__init__(iterable, **kwargs) def update(self, other): """Adds counts for elements in other""" if isinstance(other, self.__class__): self.n_sents += other.n_sents for x, n in other.items(): self[x] += n else: for sent in other: self.n_sents += 1 if self.poscache is not None: if sent in self.poscache: tags = self.poscache[sent] else: self.poscache[sent] = tags = nltk.pos_tag( nltk.word_tokenize(sent)) else: tags = nltk.pos_tag(nltk.word_tokenize(sent)) for x in tags: tok, tag = x self[tag] += 1 if self.normalize: for x, n in self.items(): self[x] /= float(self.n_sents) class PosTagFreqVectorizer(BaseEstimator): """ Convert a collection of raw documents to a matrix Pos tag frequencies """ def __init__(self, input='content', charset='utf-8', charset_error='strict', strip_accents=None, vocabulary=None, normalize=True, dtype=float): self.input = input self.charset = charset self.charset_error = charset_error self.strip_accents = strip_accents if vocabulary is not None: self.fixed_vocabulary = True if not isinstance(vocabulary, Mapping): vocabulary = dict((t, i) for i, t in enumerate(vocabulary)) self.vocabulary_ = vocabulary else: self.fixed_vocabulary = False try: self.poscache = json.load(open(poscache_filename, "r")) except IOError: self.poscache = {} self.normalize = normalize self.dtype = dtype def write_poscache(self): json.dump(self.poscache, open(poscache_filename, "w")) def decode(self, doc): """Decode the input into a string of unicode symbols The decoding strategy depends on the vectorizer parameters. """ if self.input == 'filename': doc = open(doc, 'rb').read() elif self.input == 'file': doc = doc.read() if isinstance(doc, bytes): doc = doc.decode(self.charset, self.charset_error) return doc def build_preprocessor(self): """Return a function to preprocess the text before tokenization""" # unfortunately python functools package does not have an efficient # `compose` function that would have allowed us to chain a dynamic # number of functions. However the however of a lambda call is a few # hundreds of nanoseconds which is negligible when compared to the # cost of tokenizing a string of 1000 chars for instance. noop = lambda x: x # accent stripping if not self.strip_accents: strip_accents = noop elif hasattr(self.strip_accents, '__call__'): strip_accents = self.strip_accents elif self.strip_accents == 'ascii': strip_accents = strip_accents_ascii elif self.strip_accents == 'unicode': strip_accents = strip_accents_unicode else: raise ValueError('Invalid value for "strip_accents": %s' % self.strip_accents) only_prose = lambda s: re.sub('<[^>]*>', '', s).replace("\n", " ") return lambda x: strip_accents(only_prose(x)) def build_tokenizer(self): """Return a function that split a string in sequence of tokens""" return nltk.sent_tokenize def build_analyzer(self): """Return a callable that handles preprocessing and tokenization""" preprocess = self.build_preprocessor() tokenize = self.build_tokenizer() return lambda doc: tokenize(preprocess(self.decode(doc))) def _term_count_dicts_to_matrix(self, term_count_dicts): i_indices = [] j_indices = [] values = [] vocabulary = self.vocabulary_ for i, term_count_dict in enumerate(term_count_dicts): for term, count in term_count_dict.items(): j = vocabulary.get(term) if j is not None: i_indices.append(i) j_indices.append(j) values.append(count) # free memory as we go term_count_dict.clear() shape = (len(term_count_dicts), max(vocabulary.values()) + 1) spmatrix = sp.csr_matrix((values, (i_indices, j_indices)), shape=shape, dtype=self.dtype) return spmatrix def fit(self, raw_documents, y=None): """Learn a vocabulary dictionary of all tokens in the raw documents Parameters ---------- raw_documents: iterable an iterable which yields either str, unicode or file objects Returns ------- self """ self.fit_transform(raw_documents) return self def fit_transform(self, raw_documents, y=None): """Learn the vocabulary dictionary and return the count vectors This is more efficient than calling fit followed by transform. Parameters ---------- raw_documents: iterable an iterable which yields either str, unicode or file objects Returns ------- vectors: array, [n_samples, n_features] """ if self.fixed_vocabulary: # No need to fit anything, directly perform the transformation. # We intentionally don't call the transform method to make it # fit_transform overridable without unwanted side effects in # TfidfVectorizer analyze = self.build_analyzer() term_counts_per_doc = [PosCounter(analyze(doc), normalize=self.normalize, poscache=self.poscache) for doc in raw_documents] return self._term_count_dicts_to_matrix(term_counts_per_doc) self.vocabulary_ = {} # result of document conversion to term count dicts term_counts_per_doc = [] term_counts = Counter() analyze = self.build_analyzer() for doc in raw_documents: term_count_current = PosCounter( analyze(doc), normalize=self.normalize, poscache=self.poscache) term_counts.update(term_count_current) term_counts_per_doc.append(term_count_current) self.write_poscache() terms = set(term_counts) # store map from term name to feature integer index: we sort the term # to have reproducible outcome for the vocabulary structure: otherwise # the mapping from feature name to indices might depend on the memory # layout of the machine. Furthermore sorted terms might make it # possible to perform binary search in the feature names array. self.vocabulary_ = dict(((t, i) for i, t in enumerate(sorted(terms)))) return self._term_count_dicts_to_matrix(term_counts_per_doc) def transform(self, raw_documents): """Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided in the constructor. Parameters ---------- raw_documents: iterable an iterable which yields either str, unicode or file objects Returns ------- vectors: sparse matrix, [n_samples, n_features] """ if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0: raise ValueError("Vocabulary wasn't fitted or is empty!") # raw_documents can be an iterable so we don't know its size in # advance # XXX @larsmans tried to parallelize the following loop with joblib. # The result was some 20% slower than the serial version. analyze = self.build_analyzer() term_counts_per_doc = [Counter(analyze(doc)) for doc in raw_documents] return self._term_count_dicts_to_matrix(term_counts_per_doc) def get_feature_names(self): """Array mapping from feature integer indices to feature name""" if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0: raise ValueError("Vocabulary wasn't fitted or is empty!") return [t for t, i in sorted(iter(self.vocabulary_.items()), key=itemgetter(1))]
mit
bendudson/freegs
06-xpoints.py
1
1546
#!/usr/bin/env python # # Example demonstrating functions for creating and finding X-points import freegs # Plotting routines from freegs.plotting import plotEquilibrium, plotCoils, plotConstraints import matplotlib.pyplot as plt tokamak = freegs.machine.TestTokamak() eq = freegs.Equilibrium(tokamak=tokamak, nx=256,ny=256) ########################################################## # Calculate currents in coils to create X-points # in specified locations # xpoints = [(1.1, -0.8), # (R,Z) locations of X-points (1.1, 0.8)] control = freegs.control.constrain(xpoints=xpoints) control(eq) # Apply control to Equilibrium eq psi = eq.psi() print("=> Solved coil currents, created X-points") ax = plotEquilibrium(eq, show=False) plotCoils(tokamak.coils, axis=ax) plotConstraints(control, axis=ax) plt.show() ########################################################## # Find critical points (O- and X-points) # # import freegs.critical as critical opt, xpt = critical.find_critical(eq.R, eq.Z, psi) print("=> Found O- and X-points") ax = plotEquilibrium(eq, show=False, oxpoints=False) for r,z,_ in xpt: ax.plot(r,z,'ro') for r,z,_ in opt: ax.plot(r,z,'go') psi_bndry = xpt[0][2] sep_contour=ax.contour(eq.R, eq.Z,psi, levels=[psi_bndry], colors='r') plt.show() ########################################################## # Create a mask array, 1 in the core and 0 outside # # mask = critical.core_mask(eq.R, eq.Z, psi, opt, xpt) print("=> Created X-point mask") plt.contourf(eq.R, eq.Z, mask) plt.show()
lgpl-3.0
lazywei/scikit-learn
benchmarks/bench_plot_omp_lars.py
266
4447
"""Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle regression (:ref:`least_angle_regression`) The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path, orthogonal_mp from sklearn.datasets.samples_generator import make_sparse_coded_signal def compute_bench(samples_range, features_range): it = 0 results = dict() lars = np.empty((len(features_range), len(samples_range))) lars_gram = lars.copy() omp = lars.copy() omp_gram = lars.copy() max_it = len(samples_range) * len(features_range) for i_s, n_samples in enumerate(samples_range): for i_f, n_features in enumerate(features_range): it += 1 n_informative = n_features / 10 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') # dataset_kwargs = { # 'n_train_samples': n_samples, # 'n_test_samples': 2, # 'n_features': n_features, # 'n_informative': n_informative, # 'effective_rank': min(n_samples, n_features) / 10, # #'effective_rank': None, # 'bias': 0.0, # } dataset_kwargs = { 'n_samples': 1, 'n_components': n_features, 'n_features': n_samples, 'n_nonzero_coefs': n_informative, 'random_state': 0 } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) y, X, _ = make_sparse_coded_signal(**dataset_kwargs) X = np.asfortranarray(X) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative) delta = time() - tstart print("%0.3fs" % delta) lars_gram[i_f, i_s] = delta gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, Gram=None, max_iter=n_informative) delta = time() - tstart print("%0.3fs" % delta) lars[i_f, i_s] = delta gc.collect() print("benchmarking orthogonal_mp (with Gram):", end='') sys.stdout.flush() tstart = time() orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_informative) delta = time() - tstart print("%0.3fs" % delta) omp_gram[i_f, i_s] = delta gc.collect() print("benchmarking orthogonal_mp (without Gram):", end='') sys.stdout.flush() tstart = time() orthogonal_mp(X, y, precompute=False, n_nonzero_coefs=n_informative) delta = time() - tstart print("%0.3fs" % delta) omp[i_f, i_s] = delta results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram) results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp) return results if __name__ == '__main__': samples_range = np.linspace(1000, 5000, 5).astype(np.int) features_range = np.linspace(1000, 5000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(np.max(t) for t in results.values()) import pylab as pl fig = pl.figure('scikit-learn OMP vs. LARS benchmark results') for i, (label, timings) in enumerate(sorted(results.iteritems())): ax = fig.add_subplot(1, 2, i) vmax = max(1 - timings.min(), -1 + timings.max()) pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax) ax.set_xticklabels([''] + map(str, samples_range)) ax.set_yticklabels([''] + map(str, features_range)) pl.xlabel('n_samples') pl.ylabel('n_features') pl.title(label) pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63) ax = pl.axes([0.1, 0.08, 0.8, 0.06]) pl.colorbar(cax=ax, orientation='horizontal') pl.show()
bsd-3-clause
Rohisha/classify-satellite-imagery
vggnet_utils.py
1
12721
import argparse import os import tensorflow as tf import tensorflow.contrib.slim as slim import tensorflow.contrib.slim.nets import pandas as pd import random from tqdm import tqdm import numpy as np from sklearn.metrics import fbeta_score VGG_MEAN = [123.68, 116.78, 103.94] """ Setup: Uses tf.contrib.data module which is in release candidate 1.2.0rc0 Based on: - PyTorch example from Justin Johnson: https://gist.github.com/jcjohnson/6e41e8512c17eae5da50aebef3378a4c - https://gist.github.com/omoindrot/dedc857cdc0e680dfb1be99762990c9c Required packages: tensorflow (v1.2) You can install the release candidate 1.2.0rc0 here: https://www.tensorflow.org/versions/r1.2/install/ Download the weights trained on ImageNet for VGG: ``` wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz tar -xvf vgg_16_2016_08_28.tar.gz rm vgg_16_2016_08_28.tar.gz ``` """ def testing(): print("done") return def list_images(directory): """ Get all the images and labels from data/train_v2.csv""" filenames_targets = pd.read_csv('data/train_v2.csv') # column1: image_name, column2: tags (labels for image file) filenames = [directory + file + '.jpg' for file in filenames_targets['image_name'].tolist()] labels = filenames_targets['tags'].tolist() # Convert to one-hot labels flatten = lambda l: [item for sublist in l for item in sublist] labels = list(set(flatten([l.split(' ') for l in filenames_targets['tags'].values]))) label_map = {l: i for i, l in enumerate(labels)} inv_label_map = {i: l for l, i in label_map.items()} one_hot_labels = [] for f, tags in tqdm(filenames_targets.values, miniters=1000): targets = [0]*17 #np.zeros(17) for t in tags.split(' '): targets[label_map[t]] = 1 one_hot_labels.append(targets) print("listed") return filenames, one_hot_labels # [:1000] # change to F score ''' def check_accuracy(sess, correct_prediction, is_training, dataset_init_op): """ Check the accuracy of the model on either train or val (depending on dataset_init_op). """ # Initialize the correct dataset sess.run(dataset_init_op) num_correct, num_samples = 0, 0 while True: try: correct_pred = sess.run(correct_prediction, {is_training: False}) num_correct += correct_pred.sum() num_samples += correct_pred.shape[0] except tf.errors.OutOfRangeError: break # Return the fraction of datapoints that were correctly classified acc = float(num_correct) / num_samples return acc ''' def split_samples(all_filenames, all_labels): """ Split all filenames and labels into training and test sets, return both""" n = len(all_filenames) order = random.sample(range(n), n) all_filenames_random = [all_filenames[i] for i in order] all_labels_random = [all_labels[i] for i in order] third = int(n/3) val_filenames = all_filenames_random[:third] val_labels = all_labels_random[:third] train_filenames = all_filenames_random[third:] train_labels = all_labels_random[third:] return train_filenames, train_labels, val_filenames, val_labels # Preprocessing (for both training and validation): # (1) Decode the image from jpg format # (2) Resize the image so its smaller side is 256 pixels long def _parse_function(filename, label): image_string = tf.read_file(filename) image_decoded = tf.image.decode_jpeg(image_string, channels=3) # (1) image = tf.cast(image_decoded, tf.float32) smallest_side = 256.0 height, width = tf.shape(image)[0], tf.shape(image)[1] height = tf.to_float(height) width = tf.to_float(width) scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height) new_height = tf.to_int32(height * scale) new_width = tf.to_int32(width * scale) resized_image = tf.image.resize_images(image, [new_height, new_width]) # (2) return resized_image, label # Preprocessing (for training) # (3) Take a random 224x224 crop to the scaled image # (4) Horizontally flip the image with probability 1/2 # (5) Substract the per color mean `VGG_MEAN` # Note: we don't normalize the data here, as VGG was trained without normalization def training_preprocess(image, label): crop_image = tf.random_crop(image, [224, 224, 3]) # (3) flip_image = tf.image.random_flip_left_right(crop_image) # (4) means = tf.reshape(tf.constant(VGG_MEAN), [1, 1, 3]) centered_image = flip_image - means # (5) return centered_image, label # Preprocessing (for validation) # (3) Take a central 224x224 crop to the scaled image # (4) Substract the per color mean `VGG_MEAN` # Note: we don't normalize the data here, as VGG was trained without normalization def val_preprocess(image, label): crop_image = tf.image.resize_image_with_crop_or_pad(image, 224, 224) # (3) means = tf.reshape(tf.constant(VGG_MEAN), [1, 1, 3]) centered_image = crop_image - means # (4) return centered_image, label def define_graph(args): """Defines the computational graph for the transfer learning model""" # Get the list of filenames and corresponding list of labels for training et validation # train_filenames, train_labels = list_images(args.train_dir) # val_filenames, val_labels = list_images(args.val_dir) all_filenames, all_labels = list_images(args.train_dir) train_filenames, train_labels, val_filenames, val_labels = split_samples(all_filenames, all_labels) num_classes = 17 # -------------------------------------------------------------------------- # In TensorFlow, you first want to define the computation graph with all the # necessary operations: loss, training op, accuracy... # Any tensor created in the `graph.as_default()` scope will be part of `graph` graph = tf.Graph() with graph.as_default(): # Standard preprocessing for VGG on ImageNet taken from here: # https://github.com/tensorflow/models/blob/master/slim/preprocessing/vgg_preprocessing.py # Also see the VGG paper for more details: https://arxiv.org/pdf/1409.1556.pdf # ---------------------------------------------------------------------- # DATASET CREATION using tf.contrib.data.Dataset # https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/data # The tf.contrib.data.Dataset framework uses queues in the background to feed in # data to the model. # We initialize the dataset with a list of filenames and labels, and then apply # the preprocessing functions described above. # Behind the scenes, queues will load the filenames, preprocess them with multiple # threads and apply the preprocessing in parallel, and then batch the data # Training dataset train_filenames = tf.constant(train_filenames) train_labels = tf.constant(train_labels) train_dataset = tf.contrib.data.Dataset.from_tensor_slices((train_filenames, train_labels)) train_dataset = train_dataset.map(_parse_function, num_threads=args.num_workers, output_buffer_size=args.batch_size) train_dataset = train_dataset.map(training_preprocess, num_threads=args.num_workers, output_buffer_size=args.batch_size) train_dataset = train_dataset.shuffle(buffer_size=10000) # don't forget to shuffle batched_train_dataset = train_dataset.batch(args.batch_size) # Validation dataset val_filenames = tf.constant(val_filenames) val_labels = tf.constant(val_labels) val_dataset = tf.contrib.data.Dataset.from_tensor_slices((val_filenames, val_labels)) val_dataset = val_dataset.map(_parse_function, num_threads=args.num_workers, output_buffer_size=args.batch_size) val_dataset = val_dataset.map(val_preprocess, num_threads=args.num_workers, output_buffer_size=args.batch_size) batched_val_dataset = val_dataset.batch(args.batch_size) print("dataset created") # Now we define an iterator that can operator on either dataset. # The iterator can be reinitialized by calling: # - sess.run(train_init_op) for 1 epoch on the training set # - sess.run(val_init_op) for 1 epoch on the valiation set # Once this is done, we don't need to feed any value for images and labels # as they are automatically pulled out from the iterator queues. # A reinitializable iterator is defined by its structure. We could use the # `output_types` and `output_shapes` properties of either `train_dataset` # or `validation_dataset` here, because they are compatible. iterator = tf.contrib.data.Iterator.from_structure(batched_train_dataset.output_types, batched_train_dataset.output_shapes) images, labels = iterator.get_next() train_init_op = iterator.make_initializer(batched_train_dataset) val_init_op = iterator.make_initializer(batched_val_dataset) # Indicates whether we are in training or in test mode is_training = tf.placeholder(tf.bool) # --------------------------------------------------------------------- # Now that we have set up the data, it's time to set up the model. # For this example, we'll use VGG-16 pretrained on ImageNet. We will remove the # last fully connected layer (fc8) and replace it with our own, with an # output size num_classes=8 # We will first train the last layer for a few epochs. # Then we will train the entire model on our dataset for a few epochs. # Get the pretrained model, specifying the num_classes argument to create a new # fully connected replacing the last one, called "vgg_16/fc8" # Each model has a different architecture, so "vgg_16/fc8" will change in another model. # Here, logits gives us directly the predicted scores we wanted from the images. # We pass a scope to initialize "vgg_16/fc8" weights with he_initializer vgg = tf.contrib.slim.nets.vgg with slim.arg_scope(vgg.vgg_arg_scope(weight_decay=args.weight_decay)): logits, _ = vgg.vgg_16(images, num_classes=num_classes, is_training=is_training, dropout_keep_prob=args.dropout_keep_prob) # Specify where the model checkpoint is (pretrained weights). model_path = args.model_path assert(os.path.isfile(model_path)) # Restore only the layers up to fc7 (included) # Calling function `init_fn(sess)` will load all the pretrained weights. variables_to_restore = tf.contrib.framework.get_variables_to_restore(exclude=['vgg_16/fc8']) init_fn = tf.contrib.framework.assign_from_checkpoint_fn(model_path, variables_to_restore) # Initialization operation from scratch for the new "fc8" layers # `get_variables` will only return the variables whose name starts with the given pattern fc8_variables = tf.contrib.framework.get_variables('vgg_16/fc8') fc8_init = tf.variables_initializer(fc8_variables) # --------------------------------------------------------------------- # Using tf.losses, any loss is added to the tf.GraphKeys.LOSSES collection # We can then call the total loss easily # tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits) # softmax cross entropy loss so can have labels with multiple classes loss = tf.losses.get_total_loss() # First we want to train only the reinitialized last layer fc8 for a few epochs. # We run minimize the loss only with respect to the fc8 variables (weight and bias). fc8_optimizer = tf.train.GradientDescentOptimizer(args.learning_rate1) fc8_train_op = fc8_optimizer.minimize(loss, var_list=fc8_variables) # Then we want to finetune the entire model for a few epochs. # We run minimize the loss only with respect to all the variables. full_optimizer = tf.train.GradientDescentOptimizer(args.learning_rate2) full_train_op = full_optimizer.minimize(loss) tf.get_default_graph().finalize() return graph, init_fn, fc8_init, fc8_optimizer, fc8_train_op, loss
gpl-3.0
foreni-packages/golismero
thirdparty_libs/nltk/draw/dispersion.py
17
1693
# Natural Language Toolkit: Dispersion Plots # # Copyright (C) 2001-2012 NLTK Project # Author: Steven Bird <[email protected]> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT """ A utility for displaying lexical dispersion. """ def dispersion_plot(text, words, ignore_case=False): """ Generate a lexical dispersion plot. :param text: The source text :type text: list(str) or enum(str) :param words: The target words :type words: list of str :param ignore_case: flag to set if case should be ignored when searching text :type ignore_case: bool """ try: import pylab except ImportError: raise ValueError('The plot function requires the matplotlib package (aka pylab).' 'See http://matplotlib.sourceforge.net/') text = list(text) words.reverse() if ignore_case: words_to_comp = map(str.lower, words) text_to_comp = map(str.lower, text) else: words_to_comp = words text_to_comp = text points = [(x,y) for x in range(len(text_to_comp)) for y in range(len(words_to_comp)) if text_to_comp[x] == words_to_comp[y]] if points: x, y = zip(*points) else: x = y = () pylab.plot(x, y, "b|", scalex=.1) pylab.yticks(range(len(words)), words, color="b") pylab.ylim(-1, len(words)) pylab.title("Lexical Dispersion Plot") pylab.xlabel("Word Offset") pylab.show() if __name__ == '__main__': from nltk.corpus import gutenberg words = ['Elinor', 'Marianne', 'Edward', 'Willoughby'] dispersion_plot(gutenberg.words('austen-sense.txt'), words)
gpl-2.0
virneo/opencog
opencog/python/spatiotemporal/demo.py
33
1221
__author__ = 'sebastian' from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium from spatiotemporal.temporal_events.relation_formulas import FormulaCreator from spatiotemporal.temporal_events.composition.non_linear_least_squares import DecompositionFitter import matplotlib.pyplot as plt all_relations = "pmoFDseSdfOMP" a = TemporalEventTrapezium(1, 12, 4, 8) b = TemporalEventTrapezium(9, 17, 13, 15) # compute relations between events temporal_relations = a * b print("Relations: {0}".format(temporal_relations.to_list())) # print degree for every relation for relation in all_relations: print(relation, temporal_relations[relation]) # plot events a.plot(show_distributions=True).ylim(ymin=-0.1, ymax=1.1) b.plot(show_distributions=True).figure() plt.show() # from the 13 relations, learns parameters for all combinations of the # before, same, and after relationships between the beginning and # ending distributions of the two intervals formula = FormulaCreator(DecompositionFitter(temporal_relations)) # from these relationships, computes the 13 relations again relations_estimate = formula.calculate_relations() print("Estimated relations: {0}".format(relations_estimate.to_list()))
agpl-3.0
soulmachine/scikit-learn
sklearn/metrics/scorer.py
5
12618
""" The :mod:`sklearn.metrics.scorer` submodule implements a flexible interface for model selection and evaluation using arbitrary score functions. A scorer object is a callable that can be passed to :class:`sklearn.grid_search.GridSearchCV` or :func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter, to specify how a model should be evaluated. The signature of the call is ``(estimator, X, y)`` where ``estimator`` is the model to be evaluated, ``X`` is the test data and ``y`` is the ground truth labeling (or ``None`` in the case of unsupervised models). """ # Authors: Andreas Mueller <[email protected]> # Lars Buitinck <[email protected]> # Arnaud Joly <[email protected]> # License: Simplified BSD from abc import ABCMeta, abstractmethod import numpy as np from . import (r2_score, mean_absolute_error, mean_squared_error, accuracy_score, f1_score, roc_auc_score, average_precision_score, precision_score, recall_score, log_loss) from .cluster import adjusted_rand_score from ..utils.multiclass import type_of_target from ..externals import six class _BaseScorer(six.with_metaclass(ABCMeta, object)): def __init__(self, score_func, sign, kwargs): self._kwargs = kwargs self._score_func = score_func self._sign = sign @abstractmethod def __call__(self, estimator, X, y, sample_weight=None): pass def __repr__(self): kwargs_string = "".join([", %s=%s" % (str(k), str(v)) for k, v in self._kwargs.items()]) return ("make_scorer(%s%s%s%s)" % (self._score_func.__name__, "" if self._sign > 0 else ", greater_is_better=False", self._factory_args(), kwargs_string)) def _factory_args(self): """Return non-default make_scorer arguments for repr.""" return "" class _PredictScorer(_BaseScorer): def __call__(self, estimator, X, y_true, sample_weight=None): """Evaluate predicted target values for X relative to y_true. Parameters ---------- estimator : object Trained estimator to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to estimator.predict. y_true : array-like Gold standard target values for X. sample_weight : array-like, optional (default=None) Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_pred = estimator.predict(X) if sample_weight is not None: return self._sign * self._score_func(y_true, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y_true, y_pred, **self._kwargs) class _ProbaScorer(_BaseScorer): def __call__(self, clf, X, y, sample_weight=None): """Evaluate predicted probabilities for X relative to y_true. Parameters ---------- clf : object Trained classifier to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to clf.predict_proba. y : array-like Gold standard target values for X. These must be class labels, not probabilities. sample_weight : array-like, optional (default=None) Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_pred = clf.predict_proba(X) if sample_weight is not None: return self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y, y_pred, **self._kwargs) def _factory_args(self): return ", needs_proba=True" class _ThresholdScorer(_BaseScorer): def __call__(self, clf, X, y, sample_weight=None): """Evaluate decision function output for X relative to y_true. Parameters ---------- clf : object Trained classifier to use for scoring. Must have either a decision_function method or a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to clf.decision_function or clf.predict_proba. y : array-like Gold standard target values for X. These must be class labels, not decision function values. sample_weight : array-like, optional (default=None) Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_type = type_of_target(y) if y_type not in ("binary", "multilabel-indicator"): raise ValueError("{0} format is not supported".format(y_type)) try: y_pred = clf.decision_function(X) # For multi-output multi-class estimator if isinstance(y_pred, list): y_pred = np.vstack(p for p in y_pred).T except (NotImplementedError, AttributeError): y_pred = clf.predict_proba(X) if y_type == "binary": y_pred = y_pred[:, 1] elif isinstance(y_pred, list): y_pred = np.vstack([p[:, -1] for p in y_pred]).T if sample_weight is not None: return self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y, y_pred, **self._kwargs) def _factory_args(self): return ", needs_threshold=True" def get_scorer(scoring): if isinstance(scoring, six.string_types): try: scorer = SCORERS[scoring] except KeyError: raise ValueError('%r is not a valid scoring value. ' 'Valid options are %s' % (scoring, sorted(SCORERS.keys()))) else: scorer = scoring return scorer def _passthrough_scorer(estimator, *args, **kwargs): """Function that wraps estimator.score""" return estimator.score(*args, **kwargs) def check_scoring(estimator, scoring=None, allow_none=False, score_overrides_loss=False): """Determine scorer from user options. A TypeError will be thrown if the estimator cannot be scored. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. allow_none : boolean, optional, default: False If no scoring is specified and the estimator has no score function, we can either return None or raise an exception. Returns ------- scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. """ has_scoring = scoring is not None if not hasattr(estimator, 'fit'): raise TypeError("estimator should a be an estimator implementing " "'fit' method, %r was passed" % estimator) elif hasattr(estimator, 'predict') and has_scoring: return get_scorer(scoring) elif hasattr(estimator, 'score'): return _passthrough_scorer elif not has_scoring: if allow_none: return None raise TypeError( "If no scoring is specified, the estimator passed should " "have a 'score' method. The estimator %r does not." % estimator) else: raise TypeError( "The estimator passed should have a 'score' or a 'predict' " "method. The estimator %r does not." % estimator) def make_scorer(score_func, greater_is_better=True, needs_proba=False, needs_threshold=False, **kwargs): """Make a scorer from a performance metric or loss function. This factory function wraps scoring functions for use in GridSearchCV and cross_val_score. It takes a score function, such as ``accuracy_score``, ``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision`` and returns a callable that scores an estimator's output. Parameters ---------- score_func : callable, Score function (or loss function) with signature ``score_func(y, y_pred, **kwargs)``. greater_is_better : boolean, default=True Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. needs_proba : boolean, default=False Whether score_func requires predict_proba to get probability estimates out of a classifier. needs_threshold : boolean, default=False Whether score_func takes a continuous decision certainty. This only works for binary classification using estimators that have either a decision_function or predict_proba method. For example ``average_precision`` or the area under the roc curve can not be computed using discrete predictions alone. **kwargs : additional arguments Additional parameters to be passed to score_func. Returns ------- scorer : callable Callable object that returns a scalar score; greater is better. Examples -------- >>> from sklearn.metrics import fbeta_score, make_scorer >>> ftwo_scorer = make_scorer(fbeta_score, beta=2) >>> ftwo_scorer make_scorer(fbeta_score, beta=2) >>> from sklearn.grid_search import GridSearchCV >>> from sklearn.svm import LinearSVC >>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]}, ... scoring=ftwo_scorer) """ sign = 1 if greater_is_better else -1 if needs_proba and needs_threshold: raise ValueError("Set either needs_proba or needs_threshold to True," " but not both.") if needs_proba: cls = _ProbaScorer elif needs_threshold: cls = _ThresholdScorer else: cls = _PredictScorer return cls(score_func, sign, kwargs) # Standard regression scores r2_scorer = make_scorer(r2_score) mean_squared_error_scorer = make_scorer(mean_squared_error, greater_is_better=False) mean_absolute_error_scorer = make_scorer(mean_absolute_error, greater_is_better=False) # Standard Classification Scores accuracy_scorer = make_scorer(accuracy_score) f1_scorer = make_scorer(f1_score) # Score functions that need decision values roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True, needs_threshold=True) average_precision_scorer = make_scorer(average_precision_score, needs_threshold=True) precision_scorer = make_scorer(precision_score) recall_scorer = make_scorer(recall_score) # Score function for probabilistic classification log_loss_scorer = make_scorer(log_loss, greater_is_better=False, needs_proba=True) # Clustering scores adjusted_rand_scorer = make_scorer(adjusted_rand_score) SCORERS = dict(r2=r2_scorer, mean_absolute_error=mean_absolute_error_scorer, mean_squared_error=mean_squared_error_scorer, accuracy=accuracy_scorer, f1=f1_scorer, roc_auc=roc_auc_scorer, average_precision=average_precision_scorer, precision=precision_scorer, recall=recall_scorer, log_loss=log_loss_scorer, adjusted_rand_score=adjusted_rand_scorer)
bsd-3-clause
J535D165/recordlinkage
tests/test_compare.py
1
50974
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import pickle import shutil import sys import tempfile import numpy as np from numpy import arange, nan import pandas.testing as pdt from pandas import DataFrame, MultiIndex, Series, to_datetime # dependencies testing specific import pytest import recordlinkage from recordlinkage.base import BaseCompareFeature STRING_SIM_ALGORITHMS = [ 'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein', 'levenshtein', 'lcs', 'smith_waterman' ] NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss'] FIRST_NAMES = [ u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin', u'Tyler', u'Yvonne', nan ] LAST_NAMES = [ u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez', u'Rivera', nan, u'Crane', u'Padilla' ] STREET = [ u'Oliver Neck', nan, u'Melissa Way', u'Sara Dale', u'Keith Green', u'Olivia Terrace', u'Williams Trail', u'Durham Mountains', u'Anna Circle', u'Michelle Squares' ] JOB = [ u'Designer, multimedia', u'Designer, blown glass/stained glass', u'Chiropractor', u'Engineer, mining', u'Quantity surveyor', u'Phytotherapist', u'Teacher, English as a foreign language', u'Electrical engineer', u'Research officer, government', u'Economist' ] AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46] # Run all tests in this file with: # nosetests tests/test_compare.py class TestData(object): @classmethod def setup_class(cls): N_A = 100 N_B = 100 cls.A = DataFrame({ 'age': np.random.choice(AGES, N_A), 'given_name': np.random.choice(FIRST_NAMES, N_A), 'lastname': np.random.choice(LAST_NAMES, N_A), 'street': np.random.choice(STREET, N_A) }) cls.B = DataFrame({ 'age': np.random.choice(AGES, N_B), 'given_name': np.random.choice(FIRST_NAMES, N_B), 'lastname': np.random.choice(LAST_NAMES, N_B), 'street': np.random.choice(STREET, N_B) }) cls.A.index.name = 'index_df1' cls.B.index.name = 'index_df2' cls.index_AB = MultiIndex.from_arrays( [arange(len(cls.A)), arange(len(cls.B))], names=[cls.A.index.name, cls.B.index.name]) # Create a temporary directory cls.test_dir = tempfile.mkdtemp() @classmethod def teardown_class(cls): # Remove the test directory shutil.rmtree(cls.test_dir) class TestCompareApi(TestData): """General unittest for the compare API.""" def test_repr(self): comp = recordlinkage.Compare() comp.exact('given_name', 'given_name') comp.string('given_name', 'given_name', method='jaro') comp.numeric('age', 'age', method='step', offset=3, origin=2) comp.numeric('age', 'age', method='step', offset=0, origin=2) c_str = str(comp) c_repr = repr(comp) assert c_str == c_repr start_str = '<{}'.format(comp.__class__.__name__) assert c_str.startswith(start_str) def test_instance_linking(self): comp = recordlinkage.Compare() comp.exact('given_name', 'given_name') comp.string('given_name', 'given_name', method='jaro') comp.numeric('age', 'age', method='step', offset=3, origin=2) comp.numeric('age', 'age', method='step', offset=0, origin=2) result = comp.compute(self.index_AB, self.A, self.B) # returns a Series assert isinstance(result, DataFrame) # resulting series has a MultiIndex assert isinstance(result.index, MultiIndex) # indexnames are oke assert result.index.names == [self.A.index.name, self.B.index.name] assert len(result) == len(self.index_AB) def test_instance_dedup(self): comp = recordlinkage.Compare() comp.string('given_name', 'given_name', method='jaro') comp.numeric('age', 'age', method='step', offset=3, origin=2) comp.numeric('age', 'age', method='step', offset=0, origin=2) result = comp.compute(self.index_AB, self.A) # returns a Series assert isinstance(result, DataFrame) # resulting series has a MultiIndex assert isinstance(result.index, MultiIndex) # indexnames are oke assert result.index.names == [self.A.index.name, self.B.index.name] assert len(result) == len(self.index_AB) def test_label_linking(self): comp = recordlinkage.Compare() comp.compare_vectorized( lambda s1, s2: np.ones(len(s1), dtype=np.int), 'given_name', 'given_name', label='my_feature_label') result = comp.compute(self.index_AB, self.A, self.B) assert "my_feature_label" in result.columns.tolist() def test_label_dedup(self): comp = recordlinkage.Compare() comp.compare_vectorized( lambda s1, s2: np.ones(len(s1), dtype=np.int), 'given_name', 'given_name', label='my_feature_label') result = comp.compute(self.index_AB, self.A) assert "my_feature_label" in result.columns.tolist() def test_multilabel_none_linking(self): def ones_np_multi(s1, s2): return np.ones(len(s1)), np.ones((len(s1), 3)) def ones_pd_multi(s1, s2): return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3)))) comp = recordlinkage.Compare() comp.string('given_name', 'given_name', method='jaro') comp.compare_vectorized( ones_np_multi, 'given_name', 'given_name') comp.compare_vectorized( ones_pd_multi, 'given_name', 'given_name') result = comp.compute(self.index_AB, self.A, self.B) assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \ result.columns.tolist() def test_multilabel_linking(self): def ones_np_multi(s1, s2): return np.ones(len(s1)), np.ones((len(s1), 3)) def ones_pd_multi(s1, s2): return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3)))) comp = recordlinkage.Compare() comp.string('given_name', 'given_name', method='jaro') comp.compare_vectorized( ones_np_multi, 'given_name', 'given_name', label=['a', ['b', 'c', 'd']]) comp.compare_vectorized( ones_pd_multi, 'given_name', 'given_name', label=['e', ['f', 'g', 'h']]) result = comp.compute(self.index_AB, self.A, self.B) assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \ result.columns.tolist() def test_multilabel_dedup(self): def ones_np_multi(s1, s2): return np.ones(len(s1)), np.ones((len(s1), 3)) def ones_pd_multi(s1, s2): return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3)))) comp = recordlinkage.Compare() comp.string('given_name', 'given_name', method='jaro') comp.compare_vectorized( ones_np_multi, 'given_name', 'given_name', label=['a', ['b', 'c', 'd']]) comp.compare_vectorized( ones_pd_multi, 'given_name', 'given_name', label=['e', ['f', 'g', 'h']]) result = comp.compute(self.index_AB, self.A) assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \ result.columns.tolist() def test_multilabel_none_dedup(self): def ones_np_multi(s1, s2): return np.ones(len(s1)), np.ones((len(s1), 3)) def ones_pd_multi(s1, s2): return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3)))) comp = recordlinkage.Compare() comp.string('given_name', 'given_name', method='jaro') comp.compare_vectorized( ones_np_multi, 'given_name', 'given_name') comp.compare_vectorized( ones_pd_multi, 'given_name', 'given_name') result = comp.compute(self.index_AB, self.A) assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \ result.columns.tolist() def test_multilabel_error_dedup(self): def ones(s1, s2): return np.ones((len(s1), 2)) comp = recordlinkage.Compare() comp.string('given_name', 'given_name', method='jaro') comp.compare_vectorized( ones, 'given_name', 'given_name', label=['a', 'b', 'c']) with pytest.raises(ValueError): comp.compute(self.index_AB, self.A) def test_incorrect_collabels_linking(self): comp = recordlinkage.Compare() comp.string('given_name', 'given_name', method='jaro') comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int), "given_name", "not_existing_label") with pytest.raises(KeyError): comp.compute(self.index_AB, self.A, self.B) def test_incorrect_collabels_dedup(self): comp = recordlinkage.Compare() comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int), "given_name", "not_existing_label") with pytest.raises(KeyError): comp.compute(self.index_AB, self.A) def test_compare_custom_vectorized_linking(self): A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) # test without label comp = recordlinkage.Compare() comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int), 'col', 'col') result = comp.compute(ix, A, B) expected = DataFrame([1, 1, 1, 1, 1], index=ix) pdt.assert_frame_equal(result, expected) # test with label comp = recordlinkage.Compare() comp.compare_vectorized( lambda s1, s2: np.ones(len(s1), dtype=np.int), 'col', 'col', label='my_feature_label') result = comp.compute(ix, A, B) expected = DataFrame( [1, 1, 1, 1, 1], index=ix, columns=['my_feature_label']) pdt.assert_frame_equal(result, expected) # def test_compare_custom_nonvectorized_linking(self): # A = DataFrame({'col': [1, 2, 3, 4, 5]}) # B = DataFrame({'col': [1, 2, 3, 4, 5]}) # ix = MultiIndex.from_arrays([A.index.values, B.index.values]) # def custom_func(a, b): # return np.int64(1) # # test without label # comp = recordlinkage.Compare() # comp.compare_single( # custom_func, # 'col', # 'col' # ) # result = comp.compute(ix, A, B) # expected = DataFrame([1, 1, 1, 1, 1], index=ix) # pdt.assert_frame_equal(result, expected) # # test with label # comp = recordlinkage.Compare() # comp.compare_single( # custom_func, # 'col', # 'col', # label='test' # ) # result = comp.compute(ix, A, B) # expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test']) # pdt.assert_frame_equal(result, expected) def test_compare_custom_instance_type(self): A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) def call(s1, s2): # this should raise on incorrect types assert isinstance(s1, np.ndarray) assert isinstance(s2, np.ndarray) return np.ones(len(s1), dtype=np.int) comp = recordlinkage.Compare() comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int), 'col', 'col') result = comp.compute(ix, A, B) expected = DataFrame([1, 1, 1, 1, 1], index=ix) pdt.assert_frame_equal(result, expected) def test_compare_custom_vectorized_arguments_linking(self): A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) # test without label comp = recordlinkage.Compare() comp.compare_vectorized( lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col', 5) result = comp.compute(ix, A, B) expected = DataFrame([5, 5, 5, 5, 5], index=ix) pdt.assert_frame_equal(result, expected) # test with label comp = recordlinkage.Compare() comp.compare_vectorized( lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col', 5, label='test') result = comp.compute(ix, A, B) expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test']) pdt.assert_frame_equal(result, expected) # test with kwarg comp = recordlinkage.Compare() comp.compare_vectorized( lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col', x=5, label='test') result = comp.compute(ix, A, B) expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test']) pdt.assert_frame_equal(result, expected) def test_compare_custom_vectorized_dedup(self): A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]) # test without label comp = recordlinkage.Compare() comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int), 'col', 'col') result = comp.compute(ix, A) expected = DataFrame([1, 1, 1, 1, 1], index=ix) pdt.assert_frame_equal(result, expected) # test with label comp = recordlinkage.Compare() comp.compare_vectorized( lambda s1, s2: np.ones(len(s1), dtype=np.int), 'col', 'col', label='test') result = comp.compute(ix, A) expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test']) pdt.assert_frame_equal(result, expected) def test_compare_custom_vectorized_arguments_dedup(self): A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]]) # test without label comp = recordlinkage.Compare() comp.compare_vectorized( lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col', 5) result = comp.compute(ix, A) expected = DataFrame([5, 5, 5, 5, 5], index=ix) pdt.assert_frame_equal(result, expected) # test with label comp = recordlinkage.Compare() comp.compare_vectorized( lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col', 5, label='test') result = comp.compute(ix, A) expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test']) pdt.assert_frame_equal(result, expected) def test_parallel_comparing_api(self): # use single job comp = recordlinkage.Compare(n_jobs=1) comp.exact('given_name', 'given_name', label='my_feature_label') result_single = comp.compute(self.index_AB, self.A, self.B) result_single.sort_index(inplace=True) # use two jobs comp = recordlinkage.Compare(n_jobs=2) comp.exact('given_name', 'given_name', label='my_feature_label') result_2processes = comp.compute(self.index_AB, self.A, self.B) result_2processes.sort_index(inplace=True) # compare results pdt.assert_frame_equal(result_single, result_2processes) def test_parallel_comparing(self): # use single job comp = recordlinkage.Compare(n_jobs=1) comp.exact('given_name', 'given_name', label='my_feature_label') result_single = comp.compute(self.index_AB, self.A, self.B) result_single.sort_index(inplace=True) # use two jobs comp = recordlinkage.Compare(n_jobs=2) comp.exact('given_name', 'given_name', label='my_feature_label') result_2processes = comp.compute(self.index_AB, self.A, self.B) result_2processes.sort_index(inplace=True) # use two jobs comp = recordlinkage.Compare(n_jobs=4) comp.exact('given_name', 'given_name', label='my_feature_label') result_4processes = comp.compute(self.index_AB, self.A, self.B) result_4processes.sort_index(inplace=True) # compare results pdt.assert_frame_equal(result_single, result_2processes) pdt.assert_frame_equal(result_single, result_4processes) def test_pickle(self): # test if it is possible to pickle the Compare class comp = recordlinkage.Compare() comp.string('given_name', 'given_name') comp.numeric('number', 'number') comp.geo('lat', 'lng', 'lat', 'lng') comp.date('before', 'after') # do the test pickle_path = os.path.join(self.test_dir, 'pickle_compare_obj.pickle') pickle.dump(comp, open(pickle_path, 'wb')) def test_manual_parallel_joblib(self): # test if it is possible to pickle the Compare class # This is only available for python 3. For python 2, it is not # possible to pickle instancemethods. A workaround can be found at # https://stackoverflow.com/a/29873604/8727928 if sys.version.startswith("3"): # import joblib dependencies from joblib import Parallel, delayed # split the data into smaller parts len_index = int(len(self.index_AB) / 2) df_chunks = [self.index_AB[0:len_index], self.index_AB[len_index:]] comp = recordlinkage.Compare() comp.string('given_name', 'given_name') comp.string('lastname', 'lastname') comp.exact('street', 'street') # do in parallel Parallel(n_jobs=2)( delayed(comp.compute)(df_chunks[i], self.A, self.B) for i in [0, 1]) def test_indexing_types(self): # test the two types of indexing # this test needs improvement A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) B_reversed = B[::-1].copy() ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)]) # test with label indexing type comp_label = recordlinkage.Compare(indexing_type='label') comp_label.exact('col', 'col') result_label = comp_label.compute(ix, A, B_reversed) # test with position indexing type comp_position = recordlinkage.Compare(indexing_type='position') comp_position.exact('col', 'col') result_position = comp_position.compute(ix, A, B_reversed) assert (result_position.values == 1).all(axis=0) pdt.assert_frame_equal(result_label, result_position) def test_pass_list_of_features(self): from recordlinkage.compare import FrequencyA, VariableA, VariableB # setup datasets and record pairs A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)]) # test with label indexing type features = [ VariableA('col', label='y1'), VariableB('col', label='y2'), FrequencyA('col', label='y3') ] comp_label = recordlinkage.Compare(features=features) result_label = comp_label.compute(ix, A, B) assert list(result_label) == ["y1", "y2", "y3"] class TestCompareFeatures(TestData): def test_feature(self): # test using classes and the base class A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) feature = BaseCompareFeature('col', 'col') feature._f_compare_vectorized = lambda s1, s2: np.ones(len(s1)) feature.compute(ix, A, B) def test_feature_multicolumn_return(self): # test using classes and the base class A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) def ones(s1, s2): return DataFrame(np.ones((len(s1), 3))) feature = BaseCompareFeature('col', 'col') feature._f_compare_vectorized = ones result = feature.compute(ix, A, B) assert result.shape == (5, 3) def test_feature_multicolumn_input(self): # test using classes and the base class A = DataFrame({ 'col1': ['abc', 'abc', 'abc', 'abc', 'abc'], 'col2': ['abc', 'abc', 'abc', 'abc', 'abc'] }) B = DataFrame({ 'col1': ['abc', 'abd', 'abc', 'abc', '123'], 'col2': ['abc', 'abd', 'abc', 'abc', '123'] }) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) feature = BaseCompareFeature(['col1', 'col2'], ['col1', 'col2']) feature._f_compare_vectorized = \ lambda s1_1, s1_2, s2_1, s2_2: np.ones(len(s1_1)) feature.compute(ix, A, B) class TestCompareExact(TestData): """Test the exact comparison method.""" def test_exact_str_type(self): A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) expected = DataFrame([1, 0, 1, 1, 0], index=ix) comp = recordlinkage.Compare() comp.exact('col', 'col') result = comp.compute(ix, A, B) pdt.assert_frame_equal(result, expected) def test_exact_num_type(self): A = DataFrame({'col': [42, 42, 41, 43, nan]}) B = DataFrame({'col': [42, 42, 42, 42, 42]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) expected = DataFrame([1, 1, 0, 0, 0], index=ix) comp = recordlinkage.Compare() comp.exact('col', 'col') result = comp.compute(ix, A, B) pdt.assert_frame_equal(result, expected) def test_link_exact_missing(self): A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]}) B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.exact('col', 'col', label='na_') comp.exact('col', 'col', missing_value=0, label='na_0') comp.exact('col', 'col', missing_value=9, label='na_9') comp.exact('col', 'col', missing_value=nan, label='na_na') comp.exact('col', 'col', missing_value='str', label='na_str') result = comp.compute(ix, A, B) # Missing values as default expected = Series([1, 1, 0, 0, 0], index=ix, name='na_') pdt.assert_series_equal(result['na_'], expected) # Missing values as 0 expected = Series([1, 1, 0, 0, 0], index=ix, name='na_0') pdt.assert_series_equal(result['na_0'], expected) # Missing values as 9 expected = Series([1, 1, 0, 9, 9], index=ix, name='na_9') pdt.assert_series_equal(result['na_9'], expected) # Missing values as nan expected = Series([1, 1, 0, nan, nan], index=ix, name='na_na') pdt.assert_series_equal(result['na_na'], expected) # Missing values as string expected = Series([1, 1, 0, 'str', 'str'], index=ix, name='na_str') pdt.assert_series_equal(result['na_str'], expected) def test_link_exact_disagree(self): A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]}) B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.exact('col', 'col', label='d_') comp.exact('col', 'col', disagree_value=0, label='d_0') comp.exact('col', 'col', disagree_value=9, label='d_9') comp.exact('col', 'col', disagree_value=nan, label='d_na') comp.exact('col', 'col', disagree_value='str', label='d_str') result = comp.compute(ix, A, B) # disagree values as default expected = Series([1, 1, 0, 0, 0], index=ix, name='d_') pdt.assert_series_equal(result['d_'], expected) # disagree values as 0 expected = Series([1, 1, 0, 0, 0], index=ix, name='d_0') pdt.assert_series_equal(result['d_0'], expected) # disagree values as 9 expected = Series([1, 1, 9, 0, 0], index=ix, name='d_9') pdt.assert_series_equal(result['d_9'], expected) # disagree values as nan expected = Series([1, 1, nan, 0, 0], index=ix, name='d_na') pdt.assert_series_equal(result['d_na'], expected) # disagree values as string expected = Series([1, 1, 'str', 0, 0], index=ix, name='d_str') pdt.assert_series_equal(result['d_str'], expected) # tests/test_compare.py:TestCompareNumeric class TestCompareNumeric(TestData): """Test the numeric comparison methods.""" def test_numeric(self): A = DataFrame({'col': [1, 1, 1, nan, 0]}) B = DataFrame({'col': [1, 2, 3, nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.numeric('col', 'col', 'step', offset=2) comp.numeric('col', 'col', method='step', offset=2) comp.numeric('col', 'col', 'step', 2) result = comp.compute(ix, A, B) # Basics expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0) pdt.assert_series_equal(result[0], expected) # Basics expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=1) pdt.assert_series_equal(result[1], expected) # Basics expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=2) pdt.assert_series_equal(result[2], expected) def test_numeric_with_missings(self): A = DataFrame({'col': [1, 1, 1, nan, 0]}) B = DataFrame({'col': [1, 1, 1, nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.numeric('col', 'col', scale=2) comp.numeric('col', 'col', scale=2, missing_value=0) comp.numeric('col', 'col', scale=2, missing_value=123.45) comp.numeric('col', 'col', scale=2, missing_value=nan) comp.numeric('col', 'col', scale=2, missing_value='str') result = comp.compute(ix, A, B) # Missing values as default expected = Series([1.0, 1.0, 1.0, 0.0, 0.0], index=ix, name=0) pdt.assert_series_equal(result[0], expected) # Missing values as 0 expected = Series( [1.0, 1.0, 1.0, 0.0, 0.0], index=ix, dtype=np.float64, name=1) pdt.assert_series_equal(result[1], expected) # Missing values as 123.45 expected = Series([1.0, 1.0, 1.0, 123.45, 123.45], index=ix, name=2) pdt.assert_series_equal(result[2], expected) # Missing values as nan expected = Series([1.0, 1.0, 1.0, nan, nan], index=ix, name=3) pdt.assert_series_equal(result[3], expected) # Missing values as string expected = Series( [1, 1, 1, 'str', 'str'], index=ix, dtype=object, name=4) pdt.assert_series_equal(result[4], expected) @pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS) def test_numeric_algorithms(self, alg): A = DataFrame({'col': [1, 1, 1, 1, 1]}) B = DataFrame({'col': [1, 2, 3, 4, 5]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.numeric('col', 'col', method='step', offset=1, label='step') comp.numeric( 'col', 'col', method='linear', offset=1, scale=2, label='linear') comp.numeric( 'col', 'col', method='squared', offset=1, scale=2, label='squared') comp.numeric( 'col', 'col', method='exp', offset=1, scale=2, label='exp') comp.numeric( 'col', 'col', method='gauss', offset=1, scale=2, label='gauss') result_df = comp.compute(ix, A, B) result = result_df[alg] # All values between 0 and 1. assert (result >= 0.0).all() assert (result <= 1.0).all() if alg != 'step': print(alg) print(result) # sim(scale) = 0.5 expected_bool = Series( [False, False, False, True, False], index=ix, name=alg) pdt.assert_series_equal(result == 0.5, expected_bool) # sim(offset) = 1 expected_bool = Series( [True, True, False, False, False], index=ix, name=alg) pdt.assert_series_equal(result == 1.0, expected_bool) # sim(scale) larger than 0.5 expected_bool = Series( [False, False, True, False, False], index=ix, name=alg) pdt.assert_series_equal((result > 0.5) & (result < 1.0), expected_bool) # sim(scale) smaller than 0.5 expected_bool = Series( [False, False, False, False, True], index=ix, name=alg) pdt.assert_series_equal((result < 0.5) & (result >= 0.0), expected_bool) @pytest.mark.parametrize("alg", NUMERIC_SIM_ALGORITHMS) def test_numeric_algorithms_errors(self, alg): # scale negative if alg != "step": with pytest.raises(ValueError): comp = recordlinkage.Compare() comp.numeric('age', 'age', method=alg, offset=2, scale=-2) comp.compute(self.index_AB, self.A, self.B) # offset negative with pytest.raises(ValueError): comp = recordlinkage.Compare() comp.numeric('age', 'age', method=alg, offset=-2, scale=-2) comp.compute(self.index_AB, self.A, self.B) def test_numeric_does_not_exist(self): # raise when algorithm doesn't exists A = DataFrame({'col': [1, 1, 1, nan, 0]}) B = DataFrame({'col': [1, 1, 1, nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.numeric('col', 'col', method='unknown_algorithm') pytest.raises(ValueError, comp.compute, ix, A, B) # tests/test_compare.py:TestCompareDates class TestCompareDates(TestData): """Test the exact comparison method.""" def test_dates(self): A = DataFrame({ 'col': to_datetime( ['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30']) }) B = DataFrame({ 'col': to_datetime([ '2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01', '2010/9/30' ]) }) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.date('col', 'col') result = comp.compute(ix, A, B)[0] expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name=0) pdt.assert_series_equal(result, expected) def test_date_incorrect_dtype(self): A = DataFrame({ 'col': ['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30'] }) B = DataFrame({ 'col': [ '2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01', '2010/9/30' ] }) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) A['col1'] = to_datetime(A['col']) B['col1'] = to_datetime(B['col']) comp = recordlinkage.Compare() comp.date('col', 'col1') pytest.raises(ValueError, comp.compute, ix, A, B) comp = recordlinkage.Compare() comp.date('col1', 'col') pytest.raises(ValueError, comp.compute, ix, A, B) def test_dates_with_missings(self): A = DataFrame({ 'col': to_datetime( ['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30']) }) B = DataFrame({ 'col': to_datetime([ '2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01', '2010/9/30' ]) }) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.date('col', 'col', label='m_') comp.date('col', 'col', missing_value=0, label='m_0') comp.date('col', 'col', missing_value=123.45, label='m_float') comp.date('col', 'col', missing_value=nan, label='m_na') comp.date('col', 'col', missing_value='str', label='m_str') result = comp.compute(ix, A, B) # Missing values as default expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_') pdt.assert_series_equal(result['m_'], expected) # Missing values as 0 expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='m_0') pdt.assert_series_equal(result['m_0'], expected) # Missing values as 123.45 expected = Series([1, 123.45, 0, 0.5, 0.5], index=ix, name='m_float') pdt.assert_series_equal(result['m_float'], expected) # Missing values as nan expected = Series([1, nan, 0, 0.5, 0.5], index=ix, name='m_na') pdt.assert_series_equal(result['m_na'], expected) # Missing values as string expected = Series( [1, 'str', 0, 0.5, 0.5], index=ix, dtype=object, name='m_str') pdt.assert_series_equal(result['m_str'], expected) def test_dates_with_swap(self): months_to_swap = [(9, 10, 123.45), (10, 9, 123.45), (1, 2, 123.45), (2, 1, 123.45)] A = DataFrame({ 'col': to_datetime( ['2005/11/23', nan, '2004/11/23', '2010/01/10', '2010/10/30']) }) B = DataFrame({ 'col': to_datetime([ '2005/11/23', '2010/12/31', '2005/11/23', '2010/10/01', '2010/9/30' ]) }) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.date('col', 'col', label='s_') comp.date( 'col', 'col', swap_month_day=0, swap_months='default', label='s_1') comp.date( 'col', 'col', swap_month_day=123.45, swap_months='default', label='s_2') comp.date( 'col', 'col', swap_month_day=123.45, swap_months=months_to_swap, label='s_3') comp.date( 'col', 'col', swap_month_day=nan, swap_months='default', missing_value=nan, label='s_4') comp.date('col', 'col', swap_month_day='str', label='s_5') result = comp.compute(ix, A, B) # swap_month_day as default expected = Series([1, 0, 0, 0.5, 0.5], index=ix, name='s_') pdt.assert_series_equal(result['s_'], expected) # swap_month_day and swap_months as 0 expected = Series([1, 0, 0, 0, 0.5], index=ix, name='s_1') pdt.assert_series_equal(result['s_1'], expected) # swap_month_day 123.45 (float) expected = Series([1, 0, 0, 123.45, 0.5], index=ix, name='s_2') pdt.assert_series_equal(result['s_2'], expected) # swap_month_day and swap_months 123.45 (float) expected = Series([1, 0, 0, 123.45, 123.45], index=ix, name='s_3') pdt.assert_series_equal(result['s_3'], expected) # swap_month_day and swap_months as nan expected = Series([1, nan, 0, nan, 0.5], index=ix, name='s_4') pdt.assert_series_equal(result['s_4'], expected) # swap_month_day as string expected = Series( [1, 0, 0, 'str', 0.5], index=ix, dtype=object, name='s_5') pdt.assert_series_equal(result['s_5'], expected) # tests/test_compare.py:TestCompareGeo class TestCompareGeo(TestData): """Test the geo comparison method.""" def test_geo(self): # Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands) A = DataFrame({ 'lat': [52.0842455, 52.3747388, 51.9280573], 'lng': [5.0124516, 4.7585305, 4.4203581] }) B = DataFrame({ 'lat': [52.3747388, 51.9280573, 52.0842455], 'lng': [4.7585305, 4.4203581, 5.0124516] }) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.geo( 'lat', 'lng', 'lat', 'lng', method='step', offset=50) # 50 km range result = comp.compute(ix, A, B) # Missing values as default [36.639460, 54.765854, 44.092472] expected = Series([1.0, 0.0, 1.0], index=ix, name=0) pdt.assert_series_equal(result[0], expected) def test_geo_batch(self): # Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands) A = DataFrame({ 'lat': [52.0842455, 52.3747388, 51.9280573], 'lng': [5.0124516, 4.7585305, 4.4203581] }) B = DataFrame({ 'lat': [52.3747388, 51.9280573, 52.0842455], 'lng': [4.7585305, 4.4203581, 5.0124516] }) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.geo( 'lat', 'lng', 'lat', 'lng', method='step', offset=1, label='step') comp.geo( 'lat', 'lng', 'lat', 'lng', method='linear', offset=1, scale=2, label='linear') comp.geo( 'lat', 'lng', 'lat', 'lng', method='squared', offset=1, scale=2, label='squared') comp.geo( 'lat', 'lng', 'lat', 'lng', method='exp', offset=1, scale=2, label='exp') comp.geo( 'lat', 'lng', 'lat', 'lng', method='gauss', offset=1, scale=2, label='gauss') result_df = comp.compute(ix, A, B) print(result_df) for alg in ['step', 'linear', 'squared', 'exp', 'gauss']: result = result_df[alg] # All values between 0 and 1. assert (result >= 0.0).all() assert (result <= 1.0).all() def test_geo_does_not_exist(self): # Utrecht, Amsterdam, Rotterdam (Cities in The Netherlands) A = DataFrame({ 'lat': [52.0842455, 52.3747388, 51.9280573], 'lng': [5.0124516, 4.7585305, 4.4203581] }) B = DataFrame({ 'lat': [52.3747388, 51.9280573, 52.0842455], 'lng': [4.7585305, 4.4203581, 5.0124516] }) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.geo('lat', 'lng', 'lat', 'lng', method='unknown') pytest.raises(ValueError, comp.compute, ix, A, B) class TestCompareStrings(TestData): """Test the exact comparison method.""" def test_defaults(self): # default algorithm is levenshtein algorithm # test default values are indentical to levenshtein A = DataFrame({ 'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf'] }) B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.string('col', 'col', label='default') comp.string('col', 'col', method='levenshtein', label='with_args') result = comp.compute(ix, A, B) pdt.assert_series_equal( result['default'].rename(None), result['with_args'].rename(None) ) def test_fuzzy(self): A = DataFrame({ 'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf'] }) B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.string('col', 'col', method='jaro', missing_value=0) comp.string('col', 'col', method='q_gram', missing_value=0) comp.string('col', 'col', method='cosine', missing_value=0) comp.string('col', 'col', method='jaro_winkler', missing_value=0) comp.string('col', 'col', method='dameraulevenshtein', missing_value=0) comp.string('col', 'col', method='levenshtein', missing_value=0) result = comp.compute(ix, A, B) print(result) assert result.notnull().all(1).all(0) assert (result[result.notnull()] >= 0).all(1).all(0) assert (result[result.notnull()] <= 1).all(1).all(0) def test_threshold(self): A = DataFrame({'col': [u"gretzky", u"gretzky99", u"gretzky", u"gretzky"]}) B = DataFrame({'col': [u"gretzky", u"gretzky", nan, u"wayne"]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.string( 'col', 'col', method="levenshtein", threshold=0.5, missing_value=2.0, label="x_col1" ) comp.string( 'col', 'col', method="levenshtein", threshold=1.0, missing_value=0.5, label="x_col2" ) comp.string( 'col', 'col', method="levenshtein", threshold=0.0, missing_value=nan, label="x_col3" ) result = comp.compute(ix, A, B) expected = Series([1.0, 1.0, 2.0, 0.0], index=ix, name="x_col1") pdt.assert_series_equal(result["x_col1"], expected) expected = Series([1.0, 0.0, 0.5, 0.0], index=ix, name="x_col2") pdt.assert_series_equal(result["x_col2"], expected) expected = Series([1.0, 1.0, nan, 1.0], index=ix, name="x_col3") pdt.assert_series_equal(result["x_col3"], expected) @pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS) def test_incorrect_input(self, alg): A = DataFrame({'col': [1, 1, 1, nan, 0]}) B = DataFrame({'col': [1, 1, 1, nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) with pytest.raises(Exception): comp = recordlinkage.Compare() comp.string('col', 'col', method=alg) comp.compute(ix, A, B) @pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS) def test_string_algorithms_nan(self, alg): A = DataFrame({'col': [u"nan", nan, nan, nan, nan]}) B = DataFrame({'col': [u"nan", nan, nan, nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.string('col', 'col', method=alg) result = comp.compute(ix, A, B)[0] expected = Series([1.0, 0.0, 0.0, 0.0, 0.0], index=ix, name=0) pdt.assert_series_equal(result, expected) comp = recordlinkage.Compare() comp.string('col', 'col', method=alg, missing_value=nan) result = comp.compute(ix, A, B)[0] expected = Series([1.0, nan, nan, nan, nan], index=ix, name=0) pdt.assert_series_equal(result, expected) comp = recordlinkage.Compare() comp.string('col', 'col', method=alg, missing_value=9.0) result = comp.compute(ix, A, B)[0] expected = Series([1.0, 9.0, 9.0, 9.0, 9.0], index=ix, name=0) pdt.assert_series_equal(result, expected) @pytest.mark.parametrize("alg", STRING_SIM_ALGORITHMS) def test_string_algorithms(self, alg): A = DataFrame({ 'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf'] }) B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.string('col', 'col', method=alg, missing_value=0) result = comp.compute(ix, A, B)[0] assert result.notnull().all() assert (result >= 0).all() assert (result <= 1).all() assert (result > 0).any() assert (result < 1).any() def test_fuzzy_does_not_exist(self): A = DataFrame({ 'col': [u'str_abc', u'str_abc', u'str_abc', nan, u'hsdkf'] }) B = DataFrame({'col': [u'str_abc', u'str_abd', u'jaskdfsd', nan, nan]}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) comp = recordlinkage.Compare() comp.string('col', 'col', method='unknown_algorithm') pytest.raises(ValueError, comp.compute, ix, A, B) class TestCompareFreq(object): def test_freq(self): # data array_repeated = np.repeat(np.arange(10), 10) array_tiled = np.tile(np.arange(20), 5) # convert to pandas data A = DataFrame({'col': array_repeated}) B = DataFrame({'col': array_tiled}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) # the part to test from recordlinkage.compare import Frequency, FrequencyA, FrequencyB comp = recordlinkage.Compare() comp.add(Frequency(left_on='col')) comp.add(FrequencyA('col')) result = comp.compute(ix, A, B) expected = Series(np.ones((100, )) / 10, index=ix) pdt.assert_series_equal(result[0], expected.rename(0)) pdt.assert_series_equal(result[1], expected.rename(1)) comp = recordlinkage.Compare() comp.add(Frequency(right_on='col')) comp.add(FrequencyB('col')) result = comp.compute(ix, A, B) expected = Series(np.ones((100, )) / 20, index=ix) pdt.assert_series_equal(result[0], expected.rename(0)) pdt.assert_series_equal(result[1], expected.rename(1)) def test_freq_normalise(self): # data array_repeated = np.repeat(np.arange(10), 10) array_tiled = np.tile(np.arange(20), 5) # convert to pandas data A = DataFrame({'col': array_repeated}) B = DataFrame({'col': array_tiled}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) # the part to test from recordlinkage.compare import Frequency comp = recordlinkage.Compare() comp.add(Frequency(left_on='col', normalise=False)) result = comp.compute(ix, A, B) expected = DataFrame(np.ones((100, )) * 10, index=ix) pdt.assert_frame_equal(result, expected) comp = recordlinkage.Compare() comp.add(Frequency(right_on='col', normalise=False)) result = comp.compute(ix, A, B) expected = DataFrame(np.ones((100, )) * 5, index=ix) pdt.assert_frame_equal(result, expected) @pytest.mark.parametrize('missing_value', [0.0, np.nan, 10.0]) def test_freq_nan(self, missing_value): # data array_repeated = np.repeat(np.arange(10, dtype=np.float64), 10) array_repeated[90:] = np.nan array_tiled = np.tile(np.arange(20, dtype=np.float64), 5) # convert to pandas data A = DataFrame({'col': array_repeated}) B = DataFrame({'col': array_tiled}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) # the part to test from recordlinkage.compare import Frequency comp = recordlinkage.Compare() comp.add(Frequency(left_on='col', missing_value=missing_value)) result = comp.compute(ix, A, B) expected_np = np.ones((100, )) / 10 expected_np[90:] = missing_value expected = DataFrame(expected_np, index=ix) pdt.assert_frame_equal(result, expected) class TestCompareVariable(object): def test_variable(self): # data arrayA = np.random.random((100,)) arrayB = np.random.random((100,)) # convert to pandas data A = DataFrame({'col': arrayA}) B = DataFrame({'col': arrayB}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) # the part to test from recordlinkage.compare import Variable, VariableA, VariableB comp = recordlinkage.Compare() comp.add(Variable(left_on='col')) comp.add(VariableA('col')) result = comp.compute(ix, A, B) expected = Series(arrayA, index=ix) pdt.assert_series_equal(result[0], expected.rename(0)) pdt.assert_series_equal(result[1], expected.rename(1)) comp = recordlinkage.Compare() comp.add(Variable(right_on='col')) comp.add(VariableB('col')) result = comp.compute(ix, A, B) expected = Series(arrayB, index=ix) pdt.assert_series_equal(result[0], expected.rename(0)) pdt.assert_series_equal(result[1], expected.rename(1)) @pytest.mark.parametrize('missing_value', [0.0, np.nan, 10.0]) def test_variable_nan(self, missing_value): # data arrayA = np.random.random((100,)) arrayA[90:] = np.nan arrayB = np.random.random((100,)) # convert to pandas data A = DataFrame({'col': arrayA}) B = DataFrame({'col': arrayB}) ix = MultiIndex.from_arrays([A.index.values, B.index.values]) # the part to test from recordlinkage.compare import Variable comp = recordlinkage.Compare() comp.add(Variable(left_on='col', missing_value=missing_value)) features = comp.compute(ix, A, B) result = features[0].rename(None) expected = Series(arrayA, index=ix) expected.iloc[90:] = missing_value pdt.assert_series_equal(result, expected)
bsd-3-clause
larsmans/scikit-learn
examples/neighbors/plot_digits_kde_sampling.py
251
2022
""" ========================= Kernel Density Estimation ========================= This example shows how kernel density estimation (KDE), a powerful non-parametric density estimation technique, can be used to learn a generative model for a dataset. With this generative model in place, new samples can be drawn. These new samples reflect the underlying model of the data. """ import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_digits from sklearn.neighbors import KernelDensity from sklearn.decomposition import PCA from sklearn.grid_search import GridSearchCV # load the data digits = load_digits() data = digits.data # project the 64-dimensional data to a lower dimension pca = PCA(n_components=15, whiten=False) data = pca.fit_transform(digits.data) # use grid search cross-validation to optimize the bandwidth params = {'bandwidth': np.logspace(-1, 1, 20)} grid = GridSearchCV(KernelDensity(), params) grid.fit(data) print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth)) # use the best estimator to compute the kernel density estimate kde = grid.best_estimator_ # sample 44 new points from the data new_data = kde.sample(44, random_state=0) new_data = pca.inverse_transform(new_data) # turn data into a 4x11 grid new_data = new_data.reshape((4, 11, -1)) real_data = digits.data[:44].reshape((4, 11, -1)) # plot real digits and resampled digits fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[])) for j in range(11): ax[4, j].set_visible(False) for i in range(4): im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation='nearest') im.set_clim(0, 16) im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation='nearest') im.set_clim(0, 16) ax[0, 5].set_title('Selection from the input data') ax[5, 5].set_title('"New" digits drawn from the kernel density model') plt.show()
bsd-3-clause
elsuizo/Python_work_git
trayectories_on_phase_portrait.py
1
2023
#! /usr/bin/env python # -*- coding utf-8 -*- #************************************************************************* # imports #************************************************************************* import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from scipy.integrate import odeint #************************************************************************* def f(X, t): """ Inputs: ------ """ x1, x2 = X return [x1 - x2 - (x1**2 + (3.0/2.0) * x2**2) * x1, x1 + x2 - (x1**2 + (1.0/2.0) * x2**2) * x2] x1 = np.linspace(-2.0, 2.0, 20) x2 = np.linspace(-2.0, 2.0, 20) X1, X2 = np.meshgrid(x1, x2) t = 0 u, v = np.zeros(X1.shape), np.zeros(X2.shape) NI, NJ = X1.shape for i in xrange(NI): for j in xrange(NJ): x = X1[i, j] y = X2[i, j] yprime = f([x,y], t) u[i, j] = yprime[0] v[i, j] = yprime[1] fig, ax = plt.subplots() ellipse = mpl.patches.Ellipse(xy=(0, 0), width=1.7, height=1.5) fig.gca().add_artist(ellipse) plt.quiver(X1, X2, u, v, color='r',pivot='mip',units='x') plt.streamplot(X1,X2,u,v) ax.text(0, 0, r'$\vec{\nabla}\mathbf{f}<0$', fontsize=20) plt.xlabel('$x_1$',fontsize=20) plt.ylabel('$x_2$',fontsize=20) plt.title(r'Retrato de fase: $\dot{\mathbf{x}}=\mathbf{f}(\mathbf{x})$',fontsize=15) plt.xlim([-2, 2]) plt.ylim([-2, 2]) #------------------------------------------------------------------------- # button press action capture #------------------------------------------------------------------------- def on_button_press(event): t_sim = np.linspace(0,50,200) x, y = event.xdata, event.ydata # event data capture x0 = [x,y] ys = odeint(f, x0, t_sim) plt.plot(ys[:,0], ys[:,1], 'k-', markersize=10) # path plt.plot([ys[0,0]], [ys[0,1]], 'o', markersize=10) # start plt.plot([ys[-1,0]], [ys[-1,1]], 's', markersize=10) # end fig.canvas.draw() #plt.savefig('phase_portrait.png') fig.canvas.mpl_connect('button_press_event', on_button_press) plt.show()
gpl-3.0
ee-in/python-api
plotly/matplotlylib/mplexporter/tests/test_basic.py
3
6753
from ..exporter import Exporter from ..renderers import FakeRenderer, FullFakeRenderer import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np from numpy.testing import assert_warns def fake_renderer_output(fig, Renderer): renderer = Renderer() exporter = Exporter(renderer) exporter.run(fig) return renderer.output def _assert_output_equal(text1, text2): for line1, line2 in zip(text1.strip().split(), text2.strip().split()): assert line1 == line2 def test_lines(): fig, ax = plt.subplots() ax.plot(range(20), '-k') _assert_output_equal(fake_renderer_output(fig, FakeRenderer), """ opening figure opening axes draw path with 20 vertices closing axes closing figure """) _assert_output_equal(fake_renderer_output(fig, FullFakeRenderer), """ opening figure opening axes draw line with 20 points closing axes closing figure """) def test_markers(): fig, ax = plt.subplots() ax.plot(range(2), 'ok') _assert_output_equal(fake_renderer_output(fig, FakeRenderer), """ opening figure opening axes draw path with 25 vertices draw path with 25 vertices closing axes closing figure """) _assert_output_equal(fake_renderer_output(fig, FullFakeRenderer), """ opening figure opening axes draw 2 markers closing axes closing figure """) def test_path_collection(): fig, ax = plt.subplots() ax.scatter(range(3), range(3)) _assert_output_equal(fake_renderer_output(fig, FakeRenderer), """ opening figure opening axes draw path with 25 vertices draw path with 25 vertices draw path with 25 vertices closing axes closing figure """) _assert_output_equal(fake_renderer_output(fig, FullFakeRenderer), """ opening figure opening axes draw path collection with 3 offsets closing axes closing figure """) def test_text(): fig, ax = plt.subplots() ax.set_xlabel("my x label") ax.set_ylabel("my y label") ax.set_title("my title") ax.text(0.5, 0.5, "my text") _assert_output_equal(fake_renderer_output(fig, FakeRenderer), """ opening figure opening axes draw text 'my text' None draw text 'my x label' xlabel draw text 'my y label' ylabel draw text 'my title' title closing axes closing figure """) def test_path(): fig, ax = plt.subplots() ax.add_patch(plt.Circle((0, 0), 1)) ax.add_patch(plt.Rectangle((0, 0), 1, 2)) _assert_output_equal(fake_renderer_output(fig, FakeRenderer), """ opening figure opening axes draw path with 25 vertices draw path with 4 vertices closing axes closing figure """) def test_multiaxes(): fig, ax = plt.subplots(2) ax[0].plot(range(4)) ax[1].plot(range(10)) _assert_output_equal(fake_renderer_output(fig, FakeRenderer), """ opening figure opening axes draw path with 4 vertices closing axes opening axes draw path with 10 vertices closing axes closing figure """) def test_image(): np.random.seed(0) # image size depends on the seed fig, ax = plt.subplots() ax.imshow(np.random.random((10, 10)), cmap=plt.cm.jet, interpolation='nearest') _assert_output_equal(fake_renderer_output(fig, FakeRenderer), """ opening figure opening axes draw image of size 2848 closing axes closing figure """) def test_legend(): fig, ax = plt.subplots() ax.plot([1,2,3], label='label') ax.legend().set_visible(False) _assert_output_equal(fake_renderer_output(fig, FakeRenderer), """ opening figure opening axes draw path with 3 vertices opening legend closing legend closing axes closing figure """) def test_legend_dots(): fig, ax = plt.subplots() ax.plot([1,2,3], label='label') ax.plot([2,2,2], 'o', label='dots') ax.legend().set_visible(True) _assert_output_equal(fake_renderer_output(fig, FullFakeRenderer), """ opening figure opening axes draw line with 3 points draw 3 markers opening legend draw line with 2 points draw text 'label' None draw 2 markers draw text 'dots' None draw path with 5 vertices closing legend closing axes closing figure """) def test_blended(): fig, ax = plt.subplots() ax.axvline(0) assert_warns(UserWarning, fake_renderer_output, fig, FakeRenderer)
mit
smblance/ggplot
ggplot/geoms/geom_tile.py
12
3695
from __future__ import (absolute_import, division, print_function, unicode_literals) import pandas as pd import numpy as np from .geom import geom from matplotlib.patches import Rectangle import matplotlib.colors as colors import matplotlib.colorbar as colorbar class geom_tile(geom): DEFAULT_AES = {} REQUIRED_AES = {'x', 'y', 'fill'} DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'} _aes_renames = {} _units = set() def _plot_unit(self, pinfo, ax): x = pinfo.pop('x') y = pinfo.pop('y') fill = pinfo.pop('fill') # TODO: Fix this hack! # Currently, if the fill is specified in the ggplot aes wrapper, ggplot # will assign colors without regard to the fill values. This is okay for # categorical maps but not heatmaps. At this stage in the pipeline the # geom can't recover the original values. # # However, if the fill is specified in the geom_tile aes wrapper, the # original fill values are sent unaltered, so we can make a heat map # with the values. # Was the fill specified in geom wrapper only? (i.e. not in ggplot) if 'fill' in self.aes_unique_to_geom: # Determine if there are non-numeric values. if False in [isinstance(v, (int, long, float, complex)) for v in set(fill)]: # No need to handle this case. Instruct the user to put categorical # values in the ggplot wrapper. raise Exception('For categorical fill values specify fill in the ggplot aes instead of the geom_tile aes.') # All values are numeric so determine fill using colormap. else: fill_min = np.min(fill) fill_max = np.max(fill) if np.isnan(fill_min): raise Exception('Fill values cannot contain NaN values.') fill_rng = float(fill_max - fill_min) fill_vals = (fill - fill_min) / fill_rng cmap = self.gg.colormap(fill_vals.tolist()) fill = [colors.rgb2hex(c) for c in cmap[::, :3]] df = pd.DataFrame( {'x': x, 'y': y, 'fill': fill}).set_index(['x', 'y']).unstack(0) # Setup axes. x_ticks = range(2*len(set(x)) + 1) y_ticks = range(2*len(set(y)) + 1) x_indices = sorted(set(x)) y_indices = sorted(set(y)) # Setup box plotting parameters. x_start = 0 y_start = 0 x_step = 2 y_step = 2 # Plot grid. on_y = y_start for yi in xrange(len(y_indices)): on_x = x_start for xi in xrange(len(x_indices)): color = df.iloc[yi,xi] if not isinstance(color, float): ax.add_patch(Rectangle((on_x, on_y), x_step, y_step, facecolor=color)) on_x += x_step on_y += y_step # Draw the colorbar scale if drawing a heat map. if 'cmap' in locals(): norm = colors.Normalize(vmin = fill_min, vmax = fill_max) cax, kw = colorbar.make_axes(ax) cax.hold(True) colorbar.ColorbarBase(cax, cmap = self.gg.colormap, norm = norm) # Set axis labels and ticks. x_labels = ['']*(len(x_indices)+1) for i,v in enumerate(x_indices): x_labels.insert(2*i+1, v) y_labels = ['']*(len(y_indices)+1) for i,v in enumerate(y_indices): y_labels.insert(2*i+1, v) ax.set_xticklabels(x_labels) ax.set_xticks(x_ticks) ax.set_yticklabels(y_labels) ax.set_yticks(y_ticks)
bsd-2-clause
niknow/scipy
scipy/interpolate/tests/test_rbf.py
45
4626
#!/usr/bin/env python # Created by John Travers, Robert Hetland, 2007 """ Test functions for rbf module """ from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_, assert_array_almost_equal, assert_almost_equal, run_module_suite) from numpy import linspace, sin, random, exp, allclose from scipy.interpolate.rbf import Rbf FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian', 'cubic', 'quintic', 'thin-plate', 'linear') def check_rbf1d_interpolation(function): # Check that the Rbf function interpolates through the nodes (1D) x = linspace(0,10,9) y = sin(x) rbf = Rbf(x, y, function=function) yi = rbf(x) assert_array_almost_equal(y, yi) assert_almost_equal(rbf(float(x[0])), y[0]) def check_rbf2d_interpolation(function): # Check that the Rbf function interpolates through the nodes (2D). x = random.rand(50,1)*4-2 y = random.rand(50,1)*4-2 z = x*exp(-x**2-1j*y**2) rbf = Rbf(x, y, z, epsilon=2, function=function) zi = rbf(x, y) zi.shape = x.shape assert_array_almost_equal(z, zi) def check_rbf3d_interpolation(function): # Check that the Rbf function interpolates through the nodes (3D). x = random.rand(50, 1)*4 - 2 y = random.rand(50, 1)*4 - 2 z = random.rand(50, 1)*4 - 2 d = x*exp(-x**2 - y**2) rbf = Rbf(x, y, z, d, epsilon=2, function=function) di = rbf(x, y, z) di.shape = x.shape assert_array_almost_equal(di, d) def test_rbf_interpolation(): for function in FUNCTIONS: yield check_rbf1d_interpolation, function yield check_rbf2d_interpolation, function yield check_rbf3d_interpolation, function def check_rbf1d_regularity(function, atol): # Check that the Rbf function approximates a smooth function well away # from the nodes. x = linspace(0, 10, 9) y = sin(x) rbf = Rbf(x, y, function=function) xi = linspace(0, 10, 100) yi = rbf(xi) # import matplotlib.pyplot as plt # plt.figure() # plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-') # plt.plot(x, y, 'o', xi, yi-sin(xi), ':') # plt.title(function) # plt.show() msg = "abs-diff: %f" % abs(yi - sin(xi)).max() assert_(allclose(yi, sin(xi), atol=atol), msg) def test_rbf_regularity(): tolerances = { 'multiquadric': 0.1, 'inverse multiquadric': 0.15, 'gaussian': 0.15, 'cubic': 0.15, 'quintic': 0.1, 'thin-plate': 0.1, 'linear': 0.2 } for function in FUNCTIONS: yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2) def check_rbf1d_stability(function): # Check that the Rbf function with default epsilon is not subject # to overshoot. Regression for issue #4523. # # Generate some data (fixed random seed hence deterministic) np.random.seed(1234) x = np.linspace(0, 10, 50) z = x + 4.0 * np.random.randn(len(x)) rbf = Rbf(x, z, function=function) xi = np.linspace(0, 10, 1000) yi = rbf(xi) # subtract the linear trend and make sure there no spikes assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1) def test_rbf_stability(): for function in FUNCTIONS: yield check_rbf1d_stability, function def test_default_construction(): # Check that the Rbf class can be constructed with the default # multiquadric basis function. Regression test for ticket #1228. x = linspace(0,10,9) y = sin(x) rbf = Rbf(x, y) yi = rbf(x) assert_array_almost_equal(y, yi) def test_function_is_callable(): # Check that the Rbf class can be constructed with function=callable. x = linspace(0,10,9) y = sin(x) linfunc = lambda x:x rbf = Rbf(x, y, function=linfunc) yi = rbf(x) assert_array_almost_equal(y, yi) def test_two_arg_function_is_callable(): # Check that the Rbf class can be constructed with a two argument # function=callable. def _func(self, r): return self.epsilon + r x = linspace(0,10,9) y = sin(x) rbf = Rbf(x, y, function=_func) yi = rbf(x) assert_array_almost_equal(y, yi) def test_rbf_epsilon_none(): x = linspace(0, 10, 9) y = sin(x) rbf = Rbf(x, y, epsilon=None) def test_rbf_epsilon_none_collinear(): # Check that collinear points in one dimension doesn't cause an error # due to epsilon = 0 x = [1, 2, 3] y = [4, 4, 4] z = [5, 6, 7] rbf = Rbf(x, y, z, epsilon=None) assert_(rbf.epsilon > 0) if __name__ == "__main__": run_module_suite()
bsd-3-clause
louisLouL/pair_trading
capstone_env/lib/python3.6/site-packages/pandas/tests/io/sas/test_sas7bdat.py
3
6098
import pandas as pd from pandas.compat import PY2 import pandas.util.testing as tm import os import io import pytest import numpy as np class TestSAS7BDAT(object): def setup_method(self, method): self.dirpath = tm.get_data_path() self.data = [] self.test_ix = [list(range(1, 16)), [16]] for j in 1, 2: fname = os.path.join(self.dirpath, "test_sas7bdat_%d.csv" % j) df = pd.read_csv(fname) epoch = pd.datetime(1960, 1, 1) t1 = pd.to_timedelta(df["Column4"], unit='d') df["Column4"] = epoch + t1 t2 = pd.to_timedelta(df["Column12"], unit='d') df["Column12"] = epoch + t2 for k in range(df.shape[1]): col = df.iloc[:, k] if col.dtype == np.int64: df.iloc[:, k] = df.iloc[:, k].astype(np.float64) elif col.dtype == np.dtype('O'): if PY2: f = lambda x: (x.decode('utf-8') if isinstance(x, str) else x) df.iloc[:, k] = df.iloc[:, k].apply(f) self.data.append(df) def test_from_file(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k) df = pd.read_sas(fname, encoding='utf-8') tm.assert_frame_equal(df, df0) def test_from_buffer(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k) with open(fname, 'rb') as f: byts = f.read() buf = io.BytesIO(byts) rdr = pd.read_sas(buf, format="sas7bdat", iterator=True, encoding='utf-8') df = rdr.read() tm.assert_frame_equal(df, df0, check_exact=False) rdr.close() def test_from_iterator(self): for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k) rdr = pd.read_sas(fname, iterator=True, encoding='utf-8') df = rdr.read(2) tm.assert_frame_equal(df, df0.iloc[0:2, :]) df = rdr.read(3) tm.assert_frame_equal(df, df0.iloc[2:5, :]) rdr.close() @pytest.mark.xfail(reason="read_sas currently doesn't work with pathlib") def test_path_pathlib(self): tm._skip_if_no_pathlib() from pathlib import Path for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = Path(os.path.join(self.dirpath, "test%d.sas7bdat" % k)) df = pd.read_sas(fname, encoding='utf-8') tm.assert_frame_equal(df, df0) @pytest.mark.xfail(reason="read_sas currently doesn't work with localpath") def test_path_localpath(self): tm._skip_if_no_localpath() from py.path import local as LocalPath for j in 0, 1: df0 = self.data[j] for k in self.test_ix[j]: fname = LocalPath(os.path.join(self.dirpath, "test%d.sas7bdat" % k)) df = pd.read_sas(fname, encoding='utf-8') tm.assert_frame_equal(df, df0) def test_iterator_loop(self): # github #13654 for j in 0, 1: for k in self.test_ix[j]: for chunksize in 3, 5, 10, 11: fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k) rdr = pd.read_sas(fname, chunksize=10, encoding='utf-8') y = 0 for x in rdr: y += x.shape[0] assert y == rdr.row_count rdr.close() def test_iterator_read_too_much(self): # github #14734 k = self.test_ix[0][0] fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k) rdr = pd.read_sas(fname, format="sas7bdat", iterator=True, encoding='utf-8') d1 = rdr.read(rdr.row_count + 20) rdr.close() rdr = pd.read_sas(fname, iterator=True, encoding="utf-8") d2 = rdr.read(rdr.row_count + 20) tm.assert_frame_equal(d1, d2) rdr.close() def test_encoding_options(): dirpath = tm.get_data_path() fname = os.path.join(dirpath, "test1.sas7bdat") df1 = pd.read_sas(fname) df2 = pd.read_sas(fname, encoding='utf-8') for col in df1.columns: try: df1[col] = df1[col].str.decode('utf-8') except AttributeError: pass tm.assert_frame_equal(df1, df2) from pandas.io.sas.sas7bdat import SAS7BDATReader rdr = SAS7BDATReader(fname, convert_header_text=False) df3 = rdr.read() rdr.close() for x, y in zip(df1.columns, df3.columns): assert(x == y.decode()) def test_productsales(): dirpath = tm.get_data_path() fname = os.path.join(dirpath, "productsales.sas7bdat") df = pd.read_sas(fname, encoding='utf-8') fname = os.path.join(dirpath, "productsales.csv") df0 = pd.read_csv(fname) vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR", "MONTH"] df0[vn] = df0[vn].astype(np.float64) tm.assert_frame_equal(df, df0) def test_12659(): dirpath = tm.get_data_path() fname = os.path.join(dirpath, "test_12659.sas7bdat") df = pd.read_sas(fname) fname = os.path.join(dirpath, "test_12659.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0) def test_airline(): dirpath = tm.get_data_path() fname = os.path.join(dirpath, "airline.sas7bdat") df = pd.read_sas(fname) fname = os.path.join(dirpath, "airline.csv") df0 = pd.read_csv(fname) df0 = df0.astype(np.float64) tm.assert_frame_equal(df, df0, check_exact=False)
mit
qiuminxu/tensorboard
tensorboard/plugins/beholder/colormaps.py
3
39692
# The TensorFlow Authors and Google LLC release this file under the # Creative Commons CC0 (public domain dedication) in respect of the # wishes of its original authors. # # Source: https://github.com/BIDS/colormap/blob/84cb377/colormaps.py # Modifications: Represent floats as uint8 ndarray constants at runtime. # # ---------------------------------------------------------------------- # # New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt, # and (in the case of viridis) Eric Firing. # # This file and the colormaps in it are released under the CC0 license / # public domain dedication. We would appreciate credit if you use or # redistribute these colormaps, but do not impose any legal # restrictions. # # To the extent possible under law, the persons who associated CC0 with # mpl-colormaps have waived all copyright and related or neighboring # rights to mpl-colormaps. # # You should have received a copy of the CC0 legalcode along with this # work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. """Perceptually Uniform Sequential Colormaps. Each colormap constant is represented as ndarray of DT_UINT8 RGB values: - magma - inferno - plasma - viridis These colormaps were designed by: - Nathaniel J. Smith - Stefan van der Walt - Eric Firing (for viridis) See also: <https://matplotlib.org/examples/color/colormaps_reference.html> """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np def _convert(colormap_data): colormap = (np.array(colormap_data) * 255).astype(np.uint8) colormap.setflags(write=False) return colormap magma = _convert( ((0.001462, 0.000466, 0.013866), (0.002258, 0.001295, 0.018331), (0.003279, 0.002305, 0.023708), (0.004512, 0.003490, 0.029965), (0.005950, 0.004843, 0.037130), (0.007588, 0.006356, 0.044973), (0.009426, 0.008022, 0.052844), (0.011465, 0.009828, 0.060750), (0.013708, 0.011771, 0.068667), (0.016156, 0.013840, 0.076603), (0.018815, 0.016026, 0.084584), (0.021692, 0.018320, 0.092610), (0.024792, 0.020715, 0.100676), (0.028123, 0.023201, 0.108787), (0.031696, 0.025765, 0.116965), (0.035520, 0.028397, 0.125209), (0.039608, 0.031090, 0.133515), (0.043830, 0.033830, 0.141886), (0.048062, 0.036607, 0.150327), (0.052320, 0.039407, 0.158841), (0.056615, 0.042160, 0.167446), (0.060949, 0.044794, 0.176129), (0.065330, 0.047318, 0.184892), (0.069764, 0.049726, 0.193735), (0.074257, 0.052017, 0.202660), (0.078815, 0.054184, 0.211667), (0.083446, 0.056225, 0.220755), (0.088155, 0.058133, 0.229922), (0.092949, 0.059904, 0.239164), (0.097833, 0.061531, 0.248477), (0.102815, 0.063010, 0.257854), (0.107899, 0.064335, 0.267289), (0.113094, 0.065492, 0.276784), (0.118405, 0.066479, 0.286321), (0.123833, 0.067295, 0.295879), (0.129380, 0.067935, 0.305443), (0.135053, 0.068391, 0.315000), (0.140858, 0.068654, 0.324538), (0.146785, 0.068738, 0.334011), (0.152839, 0.068637, 0.343404), (0.159018, 0.068354, 0.352688), (0.165308, 0.067911, 0.361816), (0.171713, 0.067305, 0.370771), (0.178212, 0.066576, 0.379497), (0.184801, 0.065732, 0.387973), (0.191460, 0.064818, 0.396152), (0.198177, 0.063862, 0.404009), (0.204935, 0.062907, 0.411514), (0.211718, 0.061992, 0.418647), (0.218512, 0.061158, 0.425392), (0.225302, 0.060445, 0.431742), (0.232077, 0.059889, 0.437695), (0.238826, 0.059517, 0.443256), (0.245543, 0.059352, 0.448436), (0.252220, 0.059415, 0.453248), (0.258857, 0.059706, 0.457710), (0.265447, 0.060237, 0.461840), (0.271994, 0.060994, 0.465660), (0.278493, 0.061978, 0.469190), (0.284951, 0.063168, 0.472451), (0.291366, 0.064553, 0.475462), (0.297740, 0.066117, 0.478243), (0.304081, 0.067835, 0.480812), (0.310382, 0.069702, 0.483186), (0.316654, 0.071690, 0.485380), (0.322899, 0.073782, 0.487408), (0.329114, 0.075972, 0.489287), (0.335308, 0.078236, 0.491024), (0.341482, 0.080564, 0.492631), (0.347636, 0.082946, 0.494121), (0.353773, 0.085373, 0.495501), (0.359898, 0.087831, 0.496778), (0.366012, 0.090314, 0.497960), (0.372116, 0.092816, 0.499053), (0.378211, 0.095332, 0.500067), (0.384299, 0.097855, 0.501002), (0.390384, 0.100379, 0.501864), (0.396467, 0.102902, 0.502658), (0.402548, 0.105420, 0.503386), (0.408629, 0.107930, 0.504052), (0.414709, 0.110431, 0.504662), (0.420791, 0.112920, 0.505215), (0.426877, 0.115395, 0.505714), (0.432967, 0.117855, 0.506160), (0.439062, 0.120298, 0.506555), (0.445163, 0.122724, 0.506901), (0.451271, 0.125132, 0.507198), (0.457386, 0.127522, 0.507448), (0.463508, 0.129893, 0.507652), (0.469640, 0.132245, 0.507809), (0.475780, 0.134577, 0.507921), (0.481929, 0.136891, 0.507989), (0.488088, 0.139186, 0.508011), (0.494258, 0.141462, 0.507988), (0.500438, 0.143719, 0.507920), (0.506629, 0.145958, 0.507806), (0.512831, 0.148179, 0.507648), (0.519045, 0.150383, 0.507443), (0.525270, 0.152569, 0.507192), (0.531507, 0.154739, 0.506895), (0.537755, 0.156894, 0.506551), (0.544015, 0.159033, 0.506159), (0.550287, 0.161158, 0.505719), (0.556571, 0.163269, 0.505230), (0.562866, 0.165368, 0.504692), (0.569172, 0.167454, 0.504105), (0.575490, 0.169530, 0.503466), (0.581819, 0.171596, 0.502777), (0.588158, 0.173652, 0.502035), (0.594508, 0.175701, 0.501241), (0.600868, 0.177743, 0.500394), (0.607238, 0.179779, 0.499492), (0.613617, 0.181811, 0.498536), (0.620005, 0.183840, 0.497524), (0.626401, 0.185867, 0.496456), (0.632805, 0.187893, 0.495332), (0.639216, 0.189921, 0.494150), (0.645633, 0.191952, 0.492910), (0.652056, 0.193986, 0.491611), (0.658483, 0.196027, 0.490253), (0.664915, 0.198075, 0.488836), (0.671349, 0.200133, 0.487358), (0.677786, 0.202203, 0.485819), (0.684224, 0.204286, 0.484219), (0.690661, 0.206384, 0.482558), (0.697098, 0.208501, 0.480835), (0.703532, 0.210638, 0.479049), (0.709962, 0.212797, 0.477201), (0.716387, 0.214982, 0.475290), (0.722805, 0.217194, 0.473316), (0.729216, 0.219437, 0.471279), (0.735616, 0.221713, 0.469180), (0.742004, 0.224025, 0.467018), (0.748378, 0.226377, 0.464794), (0.754737, 0.228772, 0.462509), (0.761077, 0.231214, 0.460162), (0.767398, 0.233705, 0.457755), (0.773695, 0.236249, 0.455289), (0.779968, 0.238851, 0.452765), (0.786212, 0.241514, 0.450184), (0.792427, 0.244242, 0.447543), (0.798608, 0.247040, 0.444848), (0.804752, 0.249911, 0.442102), (0.810855, 0.252861, 0.439305), (0.816914, 0.255895, 0.436461), (0.822926, 0.259016, 0.433573), (0.828886, 0.262229, 0.430644), (0.834791, 0.265540, 0.427671), (0.840636, 0.268953, 0.424666), (0.846416, 0.272473, 0.421631), (0.852126, 0.276106, 0.418573), (0.857763, 0.279857, 0.415496), (0.863320, 0.283729, 0.412403), (0.868793, 0.287728, 0.409303), (0.874176, 0.291859, 0.406205), (0.879464, 0.296125, 0.403118), (0.884651, 0.300530, 0.400047), (0.889731, 0.305079, 0.397002), (0.894700, 0.309773, 0.393995), (0.899552, 0.314616, 0.391037), (0.904281, 0.319610, 0.388137), (0.908884, 0.324755, 0.385308), (0.913354, 0.330052, 0.382563), (0.917689, 0.335500, 0.379915), (0.921884, 0.341098, 0.377376), (0.925937, 0.346844, 0.374959), (0.929845, 0.352734, 0.372677), (0.933606, 0.358764, 0.370541), (0.937221, 0.364929, 0.368567), (0.940687, 0.371224, 0.366762), (0.944006, 0.377643, 0.365136), (0.947180, 0.384178, 0.363701), (0.950210, 0.390820, 0.362468), (0.953099, 0.397563, 0.361438), (0.955849, 0.404400, 0.360619), (0.958464, 0.411324, 0.360014), (0.960949, 0.418323, 0.359630), (0.963310, 0.425390, 0.359469), (0.965549, 0.432519, 0.359529), (0.967671, 0.439703, 0.359810), (0.969680, 0.446936, 0.360311), (0.971582, 0.454210, 0.361030), (0.973381, 0.461520, 0.361965), (0.975082, 0.468861, 0.363111), (0.976690, 0.476226, 0.364466), (0.978210, 0.483612, 0.366025), (0.979645, 0.491014, 0.367783), (0.981000, 0.498428, 0.369734), (0.982279, 0.505851, 0.371874), (0.983485, 0.513280, 0.374198), (0.984622, 0.520713, 0.376698), (0.985693, 0.528148, 0.379371), (0.986700, 0.535582, 0.382210), (0.987646, 0.543015, 0.385210), (0.988533, 0.550446, 0.388365), (0.989363, 0.557873, 0.391671), (0.990138, 0.565296, 0.395122), (0.990871, 0.572706, 0.398714), (0.991558, 0.580107, 0.402441), (0.992196, 0.587502, 0.406299), (0.992785, 0.594891, 0.410283), (0.993326, 0.602275, 0.414390), (0.993834, 0.609644, 0.418613), (0.994309, 0.616999, 0.422950), (0.994738, 0.624350, 0.427397), (0.995122, 0.631696, 0.431951), (0.995480, 0.639027, 0.436607), (0.995810, 0.646344, 0.441361), (0.996096, 0.653659, 0.446213), (0.996341, 0.660969, 0.451160), (0.996580, 0.668256, 0.456192), (0.996775, 0.675541, 0.461314), (0.996925, 0.682828, 0.466526), (0.997077, 0.690088, 0.471811), (0.997186, 0.697349, 0.477182), (0.997254, 0.704611, 0.482635), (0.997325, 0.711848, 0.488154), (0.997351, 0.719089, 0.493755), (0.997351, 0.726324, 0.499428), (0.997341, 0.733545, 0.505167), (0.997285, 0.740772, 0.510983), (0.997228, 0.747981, 0.516859), (0.997138, 0.755190, 0.522806), (0.997019, 0.762398, 0.528821), (0.996898, 0.769591, 0.534892), (0.996727, 0.776795, 0.541039), (0.996571, 0.783977, 0.547233), (0.996369, 0.791167, 0.553499), (0.996162, 0.798348, 0.559820), (0.995932, 0.805527, 0.566202), (0.995680, 0.812706, 0.572645), (0.995424, 0.819875, 0.579140), (0.995131, 0.827052, 0.585701), (0.994851, 0.834213, 0.592307), (0.994524, 0.841387, 0.598983), (0.994222, 0.848540, 0.605696), (0.993866, 0.855711, 0.612482), (0.993545, 0.862859, 0.619299), (0.993170, 0.870024, 0.626189), (0.992831, 0.877168, 0.633109), (0.992440, 0.884330, 0.640099), (0.992089, 0.891470, 0.647116), (0.991688, 0.898627, 0.654202), (0.991332, 0.905763, 0.661309), (0.990930, 0.912915, 0.668481), (0.990570, 0.920049, 0.675675), (0.990175, 0.927196, 0.682926), (0.989815, 0.934329, 0.690198), (0.989434, 0.941470, 0.697519), (0.989077, 0.948604, 0.704863), (0.988717, 0.955742, 0.712242), (0.988367, 0.962878, 0.719649), (0.988033, 0.970012, 0.727077), (0.987691, 0.977154, 0.734536), (0.987387, 0.984288, 0.742002), (0.987053, 0.991438, 0.749504))) inferno = _convert( ((0.001462, 0.000466, 0.013866), (0.002267, 0.001270, 0.018570), (0.003299, 0.002249, 0.024239), (0.004547, 0.003392, 0.030909), (0.006006, 0.004692, 0.038558), (0.007676, 0.006136, 0.046836), (0.009561, 0.007713, 0.055143), (0.011663, 0.009417, 0.063460), (0.013995, 0.011225, 0.071862), (0.016561, 0.013136, 0.080282), (0.019373, 0.015133, 0.088767), (0.022447, 0.017199, 0.097327), (0.025793, 0.019331, 0.105930), (0.029432, 0.021503, 0.114621), (0.033385, 0.023702, 0.123397), (0.037668, 0.025921, 0.132232), (0.042253, 0.028139, 0.141141), (0.046915, 0.030324, 0.150164), (0.051644, 0.032474, 0.159254), (0.056449, 0.034569, 0.168414), (0.061340, 0.036590, 0.177642), (0.066331, 0.038504, 0.186962), (0.071429, 0.040294, 0.196354), (0.076637, 0.041905, 0.205799), (0.081962, 0.043328, 0.215289), (0.087411, 0.044556, 0.224813), (0.092990, 0.045583, 0.234358), (0.098702, 0.046402, 0.243904), (0.104551, 0.047008, 0.253430), (0.110536, 0.047399, 0.262912), (0.116656, 0.047574, 0.272321), (0.122908, 0.047536, 0.281624), (0.129285, 0.047293, 0.290788), (0.135778, 0.046856, 0.299776), (0.142378, 0.046242, 0.308553), (0.149073, 0.045468, 0.317085), (0.155850, 0.044559, 0.325338), (0.162689, 0.043554, 0.333277), (0.169575, 0.042489, 0.340874), (0.176493, 0.041402, 0.348111), (0.183429, 0.040329, 0.354971), (0.190367, 0.039309, 0.361447), (0.197297, 0.038400, 0.367535), (0.204209, 0.037632, 0.373238), (0.211095, 0.037030, 0.378563), (0.217949, 0.036615, 0.383522), (0.224763, 0.036405, 0.388129), (0.231538, 0.036405, 0.392400), (0.238273, 0.036621, 0.396353), (0.244967, 0.037055, 0.400007), (0.251620, 0.037705, 0.403378), (0.258234, 0.038571, 0.406485), (0.264810, 0.039647, 0.409345), (0.271347, 0.040922, 0.411976), (0.277850, 0.042353, 0.414392), (0.284321, 0.043933, 0.416608), (0.290763, 0.045644, 0.418637), (0.297178, 0.047470, 0.420491), (0.303568, 0.049396, 0.422182), (0.309935, 0.051407, 0.423721), (0.316282, 0.053490, 0.425116), (0.322610, 0.055634, 0.426377), (0.328921, 0.057827, 0.427511), (0.335217, 0.060060, 0.428524), (0.341500, 0.062325, 0.429425), (0.347771, 0.064616, 0.430217), (0.354032, 0.066925, 0.430906), (0.360284, 0.069247, 0.431497), (0.366529, 0.071579, 0.431994), (0.372768, 0.073915, 0.432400), (0.379001, 0.076253, 0.432719), (0.385228, 0.078591, 0.432955), (0.391453, 0.080927, 0.433109), (0.397674, 0.083257, 0.433183), (0.403894, 0.085580, 0.433179), (0.410113, 0.087896, 0.433098), (0.416331, 0.090203, 0.432943), (0.422549, 0.092501, 0.432714), (0.428768, 0.094790, 0.432412), (0.434987, 0.097069, 0.432039), (0.441207, 0.099338, 0.431594), (0.447428, 0.101597, 0.431080), (0.453651, 0.103848, 0.430498), (0.459875, 0.106089, 0.429846), (0.466100, 0.108322, 0.429125), (0.472328, 0.110547, 0.428334), (0.478558, 0.112764, 0.427475), (0.484789, 0.114974, 0.426548), (0.491022, 0.117179, 0.425552), (0.497257, 0.119379, 0.424488), (0.503493, 0.121575, 0.423356), (0.509730, 0.123769, 0.422156), (0.515967, 0.125960, 0.420887), (0.522206, 0.128150, 0.419549), (0.528444, 0.130341, 0.418142), (0.534683, 0.132534, 0.416667), (0.540920, 0.134729, 0.415123), (0.547157, 0.136929, 0.413511), (0.553392, 0.139134, 0.411829), (0.559624, 0.141346, 0.410078), (0.565854, 0.143567, 0.408258), (0.572081, 0.145797, 0.406369), (0.578304, 0.148039, 0.404411), (0.584521, 0.150294, 0.402385), (0.590734, 0.152563, 0.400290), (0.596940, 0.154848, 0.398125), (0.603139, 0.157151, 0.395891), (0.609330, 0.159474, 0.393589), (0.615513, 0.161817, 0.391219), (0.621685, 0.164184, 0.388781), (0.627847, 0.166575, 0.386276), (0.633998, 0.168992, 0.383704), (0.640135, 0.171438, 0.381065), (0.646260, 0.173914, 0.378359), (0.652369, 0.176421, 0.375586), (0.658463, 0.178962, 0.372748), (0.664540, 0.181539, 0.369846), (0.670599, 0.184153, 0.366879), (0.676638, 0.186807, 0.363849), (0.682656, 0.189501, 0.360757), (0.688653, 0.192239, 0.357603), (0.694627, 0.195021, 0.354388), (0.700576, 0.197851, 0.351113), (0.706500, 0.200728, 0.347777), (0.712396, 0.203656, 0.344383), (0.718264, 0.206636, 0.340931), (0.724103, 0.209670, 0.337424), (0.729909, 0.212759, 0.333861), (0.735683, 0.215906, 0.330245), (0.741423, 0.219112, 0.326576), (0.747127, 0.222378, 0.322856), (0.752794, 0.225706, 0.319085), (0.758422, 0.229097, 0.315266), (0.764010, 0.232554, 0.311399), (0.769556, 0.236077, 0.307485), (0.775059, 0.239667, 0.303526), (0.780517, 0.243327, 0.299523), (0.785929, 0.247056, 0.295477), (0.791293, 0.250856, 0.291390), (0.796607, 0.254728, 0.287264), (0.801871, 0.258674, 0.283099), (0.807082, 0.262692, 0.278898), (0.812239, 0.266786, 0.274661), (0.817341, 0.270954, 0.270390), (0.822386, 0.275197, 0.266085), (0.827372, 0.279517, 0.261750), (0.832299, 0.283913, 0.257383), (0.837165, 0.288385, 0.252988), (0.841969, 0.292933, 0.248564), (0.846709, 0.297559, 0.244113), (0.851384, 0.302260, 0.239636), (0.855992, 0.307038, 0.235133), (0.860533, 0.311892, 0.230606), (0.865006, 0.316822, 0.226055), (0.869409, 0.321827, 0.221482), (0.873741, 0.326906, 0.216886), (0.878001, 0.332060, 0.212268), (0.882188, 0.337287, 0.207628), (0.886302, 0.342586, 0.202968), (0.890341, 0.347957, 0.198286), (0.894305, 0.353399, 0.193584), (0.898192, 0.358911, 0.188860), (0.902003, 0.364492, 0.184116), (0.905735, 0.370140, 0.179350), (0.909390, 0.375856, 0.174563), (0.912966, 0.381636, 0.169755), (0.916462, 0.387481, 0.164924), (0.919879, 0.393389, 0.160070), (0.923215, 0.399359, 0.155193), (0.926470, 0.405389, 0.150292), (0.929644, 0.411479, 0.145367), (0.932737, 0.417627, 0.140417), (0.935747, 0.423831, 0.135440), (0.938675, 0.430091, 0.130438), (0.941521, 0.436405, 0.125409), (0.944285, 0.442772, 0.120354), (0.946965, 0.449191, 0.115272), (0.949562, 0.455660, 0.110164), (0.952075, 0.462178, 0.105031), (0.954506, 0.468744, 0.099874), (0.956852, 0.475356, 0.094695), (0.959114, 0.482014, 0.089499), (0.961293, 0.488716, 0.084289), (0.963387, 0.495462, 0.079073), (0.965397, 0.502249, 0.073859), (0.967322, 0.509078, 0.068659), (0.969163, 0.515946, 0.063488), (0.970919, 0.522853, 0.058367), (0.972590, 0.529798, 0.053324), (0.974176, 0.536780, 0.048392), (0.975677, 0.543798, 0.043618), (0.977092, 0.550850, 0.039050), (0.978422, 0.557937, 0.034931), (0.979666, 0.565057, 0.031409), (0.980824, 0.572209, 0.028508), (0.981895, 0.579392, 0.026250), (0.982881, 0.586606, 0.024661), (0.983779, 0.593849, 0.023770), (0.984591, 0.601122, 0.023606), (0.985315, 0.608422, 0.024202), (0.985952, 0.615750, 0.025592), (0.986502, 0.623105, 0.027814), (0.986964, 0.630485, 0.030908), (0.987337, 0.637890, 0.034916), (0.987622, 0.645320, 0.039886), (0.987819, 0.652773, 0.045581), (0.987926, 0.660250, 0.051750), (0.987945, 0.667748, 0.058329), (0.987874, 0.675267, 0.065257), (0.987714, 0.682807, 0.072489), (0.987464, 0.690366, 0.079990), (0.987124, 0.697944, 0.087731), (0.986694, 0.705540, 0.095694), (0.986175, 0.713153, 0.103863), (0.985566, 0.720782, 0.112229), (0.984865, 0.728427, 0.120785), (0.984075, 0.736087, 0.129527), (0.983196, 0.743758, 0.138453), (0.982228, 0.751442, 0.147565), (0.981173, 0.759135, 0.156863), (0.980032, 0.766837, 0.166353), (0.978806, 0.774545, 0.176037), (0.977497, 0.782258, 0.185923), (0.976108, 0.789974, 0.196018), (0.974638, 0.797692, 0.206332), (0.973088, 0.805409, 0.216877), (0.971468, 0.813122, 0.227658), (0.969783, 0.820825, 0.238686), (0.968041, 0.828515, 0.249972), (0.966243, 0.836191, 0.261534), (0.964394, 0.843848, 0.273391), (0.962517, 0.851476, 0.285546), (0.960626, 0.859069, 0.298010), (0.958720, 0.866624, 0.310820), (0.956834, 0.874129, 0.323974), (0.954997, 0.881569, 0.337475), (0.953215, 0.888942, 0.351369), (0.951546, 0.896226, 0.365627), (0.950018, 0.903409, 0.380271), (0.948683, 0.910473, 0.395289), (0.947594, 0.917399, 0.410665), (0.946809, 0.924168, 0.426373), (0.946392, 0.930761, 0.442367), (0.946403, 0.937159, 0.458592), (0.946903, 0.943348, 0.474970), (0.947937, 0.949318, 0.491426), (0.949545, 0.955063, 0.507860), (0.951740, 0.960587, 0.524203), (0.954529, 0.965896, 0.540361), (0.957896, 0.971003, 0.556275), (0.961812, 0.975924, 0.571925), (0.966249, 0.980678, 0.587206), (0.971162, 0.985282, 0.602154), (0.976511, 0.989753, 0.616760), (0.982257, 0.994109, 0.631017), (0.988362, 0.998364, 0.644924))) plasma = _convert( ((0.050383, 0.029803, 0.527975), (0.063536, 0.028426, 0.533124), (0.075353, 0.027206, 0.538007), (0.086222, 0.026125, 0.542658), (0.096379, 0.025165, 0.547103), (0.105980, 0.024309, 0.551368), (0.115124, 0.023556, 0.555468), (0.123903, 0.022878, 0.559423), (0.132381, 0.022258, 0.563250), (0.140603, 0.021687, 0.566959), (0.148607, 0.021154, 0.570562), (0.156421, 0.020651, 0.574065), (0.164070, 0.020171, 0.577478), (0.171574, 0.019706, 0.580806), (0.178950, 0.019252, 0.584054), (0.186213, 0.018803, 0.587228), (0.193374, 0.018354, 0.590330), (0.200445, 0.017902, 0.593364), (0.207435, 0.017442, 0.596333), (0.214350, 0.016973, 0.599239), (0.221197, 0.016497, 0.602083), (0.227983, 0.016007, 0.604867), (0.234715, 0.015502, 0.607592), (0.241396, 0.014979, 0.610259), (0.248032, 0.014439, 0.612868), (0.254627, 0.013882, 0.615419), (0.261183, 0.013308, 0.617911), (0.267703, 0.012716, 0.620346), (0.274191, 0.012109, 0.622722), (0.280648, 0.011488, 0.625038), (0.287076, 0.010855, 0.627295), (0.293478, 0.010213, 0.629490), (0.299855, 0.009561, 0.631624), (0.306210, 0.008902, 0.633694), (0.312543, 0.008239, 0.635700), (0.318856, 0.007576, 0.637640), (0.325150, 0.006915, 0.639512), (0.331426, 0.006261, 0.641316), (0.337683, 0.005618, 0.643049), (0.343925, 0.004991, 0.644710), (0.350150, 0.004382, 0.646298), (0.356359, 0.003798, 0.647810), (0.362553, 0.003243, 0.649245), (0.368733, 0.002724, 0.650601), (0.374897, 0.002245, 0.651876), (0.381047, 0.001814, 0.653068), (0.387183, 0.001434, 0.654177), (0.393304, 0.001114, 0.655199), (0.399411, 0.000859, 0.656133), (0.405503, 0.000678, 0.656977), (0.411580, 0.000577, 0.657730), (0.417642, 0.000564, 0.658390), (0.423689, 0.000646, 0.658956), (0.429719, 0.000831, 0.659425), (0.435734, 0.001127, 0.659797), (0.441732, 0.001540, 0.660069), (0.447714, 0.002080, 0.660240), (0.453677, 0.002755, 0.660310), (0.459623, 0.003574, 0.660277), (0.465550, 0.004545, 0.660139), (0.471457, 0.005678, 0.659897), (0.477344, 0.006980, 0.659549), (0.483210, 0.008460, 0.659095), (0.489055, 0.010127, 0.658534), (0.494877, 0.011990, 0.657865), (0.500678, 0.014055, 0.657088), (0.506454, 0.016333, 0.656202), (0.512206, 0.018833, 0.655209), (0.517933, 0.021563, 0.654109), (0.523633, 0.024532, 0.652901), (0.529306, 0.027747, 0.651586), (0.534952, 0.031217, 0.650165), (0.540570, 0.034950, 0.648640), (0.546157, 0.038954, 0.647010), (0.551715, 0.043136, 0.645277), (0.557243, 0.047331, 0.643443), (0.562738, 0.051545, 0.641509), (0.568201, 0.055778, 0.639477), (0.573632, 0.060028, 0.637349), (0.579029, 0.064296, 0.635126), (0.584391, 0.068579, 0.632812), (0.589719, 0.072878, 0.630408), (0.595011, 0.077190, 0.627917), (0.600266, 0.081516, 0.625342), (0.605485, 0.085854, 0.622686), (0.610667, 0.090204, 0.619951), (0.615812, 0.094564, 0.617140), (0.620919, 0.098934, 0.614257), (0.625987, 0.103312, 0.611305), (0.631017, 0.107699, 0.608287), (0.636008, 0.112092, 0.605205), (0.640959, 0.116492, 0.602065), (0.645872, 0.120898, 0.598867), (0.650746, 0.125309, 0.595617), (0.655580, 0.129725, 0.592317), (0.660374, 0.134144, 0.588971), (0.665129, 0.138566, 0.585582), (0.669845, 0.142992, 0.582154), (0.674522, 0.147419, 0.578688), (0.679160, 0.151848, 0.575189), (0.683758, 0.156278, 0.571660), (0.688318, 0.160709, 0.568103), (0.692840, 0.165141, 0.564522), (0.697324, 0.169573, 0.560919), (0.701769, 0.174005, 0.557296), (0.706178, 0.178437, 0.553657), (0.710549, 0.182868, 0.550004), (0.714883, 0.187299, 0.546338), (0.719181, 0.191729, 0.542663), (0.723444, 0.196158, 0.538981), (0.727670, 0.200586, 0.535293), (0.731862, 0.205013, 0.531601), (0.736019, 0.209439, 0.527908), (0.740143, 0.213864, 0.524216), (0.744232, 0.218288, 0.520524), (0.748289, 0.222711, 0.516834), (0.752312, 0.227133, 0.513149), (0.756304, 0.231555, 0.509468), (0.760264, 0.235976, 0.505794), (0.764193, 0.240396, 0.502126), (0.768090, 0.244817, 0.498465), (0.771958, 0.249237, 0.494813), (0.775796, 0.253658, 0.491171), (0.779604, 0.258078, 0.487539), (0.783383, 0.262500, 0.483918), (0.787133, 0.266922, 0.480307), (0.790855, 0.271345, 0.476706), (0.794549, 0.275770, 0.473117), (0.798216, 0.280197, 0.469538), (0.801855, 0.284626, 0.465971), (0.805467, 0.289057, 0.462415), (0.809052, 0.293491, 0.458870), (0.812612, 0.297928, 0.455338), (0.816144, 0.302368, 0.451816), (0.819651, 0.306812, 0.448306), (0.823132, 0.311261, 0.444806), (0.826588, 0.315714, 0.441316), (0.830018, 0.320172, 0.437836), (0.833422, 0.324635, 0.434366), (0.836801, 0.329105, 0.430905), (0.840155, 0.333580, 0.427455), (0.843484, 0.338062, 0.424013), (0.846788, 0.342551, 0.420579), (0.850066, 0.347048, 0.417153), (0.853319, 0.351553, 0.413734), (0.856547, 0.356066, 0.410322), (0.859750, 0.360588, 0.406917), (0.862927, 0.365119, 0.403519), (0.866078, 0.369660, 0.400126), (0.869203, 0.374212, 0.396738), (0.872303, 0.378774, 0.393355), (0.875376, 0.383347, 0.389976), (0.878423, 0.387932, 0.386600), (0.881443, 0.392529, 0.383229), (0.884436, 0.397139, 0.379860), (0.887402, 0.401762, 0.376494), (0.890340, 0.406398, 0.373130), (0.893250, 0.411048, 0.369768), (0.896131, 0.415712, 0.366407), (0.898984, 0.420392, 0.363047), (0.901807, 0.425087, 0.359688), (0.904601, 0.429797, 0.356329), (0.907365, 0.434524, 0.352970), (0.910098, 0.439268, 0.349610), (0.912800, 0.444029, 0.346251), (0.915471, 0.448807, 0.342890), (0.918109, 0.453603, 0.339529), (0.920714, 0.458417, 0.336166), (0.923287, 0.463251, 0.332801), (0.925825, 0.468103, 0.329435), (0.928329, 0.472975, 0.326067), (0.930798, 0.477867, 0.322697), (0.933232, 0.482780, 0.319325), (0.935630, 0.487712, 0.315952), (0.937990, 0.492667, 0.312575), (0.940313, 0.497642, 0.309197), (0.942598, 0.502639, 0.305816), (0.944844, 0.507658, 0.302433), (0.947051, 0.512699, 0.299049), (0.949217, 0.517763, 0.295662), (0.951344, 0.522850, 0.292275), (0.953428, 0.527960, 0.288883), (0.955470, 0.533093, 0.285490), (0.957469, 0.538250, 0.282096), (0.959424, 0.543431, 0.278701), (0.961336, 0.548636, 0.275305), (0.963203, 0.553865, 0.271909), (0.965024, 0.559118, 0.268513), (0.966798, 0.564396, 0.265118), (0.968526, 0.569700, 0.261721), (0.970205, 0.575028, 0.258325), (0.971835, 0.580382, 0.254931), (0.973416, 0.585761, 0.251540), (0.974947, 0.591165, 0.248151), (0.976428, 0.596595, 0.244767), (0.977856, 0.602051, 0.241387), (0.979233, 0.607532, 0.238013), (0.980556, 0.613039, 0.234646), (0.981826, 0.618572, 0.231287), (0.983041, 0.624131, 0.227937), (0.984199, 0.629718, 0.224595), (0.985301, 0.635330, 0.221265), (0.986345, 0.640969, 0.217948), (0.987332, 0.646633, 0.214648), (0.988260, 0.652325, 0.211364), (0.989128, 0.658043, 0.208100), (0.989935, 0.663787, 0.204859), (0.990681, 0.669558, 0.201642), (0.991365, 0.675355, 0.198453), (0.991985, 0.681179, 0.195295), (0.992541, 0.687030, 0.192170), (0.993032, 0.692907, 0.189084), (0.993456, 0.698810, 0.186041), (0.993814, 0.704741, 0.183043), (0.994103, 0.710698, 0.180097), (0.994324, 0.716681, 0.177208), (0.994474, 0.722691, 0.174381), (0.994553, 0.728728, 0.171622), (0.994561, 0.734791, 0.168938), (0.994495, 0.740880, 0.166335), (0.994355, 0.746995, 0.163821), (0.994141, 0.753137, 0.161404), (0.993851, 0.759304, 0.159092), (0.993482, 0.765499, 0.156891), (0.993033, 0.771720, 0.154808), (0.992505, 0.777967, 0.152855), (0.991897, 0.784239, 0.151042), (0.991209, 0.790537, 0.149377), (0.990439, 0.796859, 0.147870), (0.989587, 0.803205, 0.146529), (0.988648, 0.809579, 0.145357), (0.987621, 0.815978, 0.144363), (0.986509, 0.822401, 0.143557), (0.985314, 0.828846, 0.142945), (0.984031, 0.835315, 0.142528), (0.982653, 0.841812, 0.142303), (0.981190, 0.848329, 0.142279), (0.979644, 0.854866, 0.142453), (0.977995, 0.861432, 0.142808), (0.976265, 0.868016, 0.143351), (0.974443, 0.874622, 0.144061), (0.972530, 0.881250, 0.144923), (0.970533, 0.887896, 0.145919), (0.968443, 0.894564, 0.147014), (0.966271, 0.901249, 0.148180), (0.964021, 0.907950, 0.149370), (0.961681, 0.914672, 0.150520), (0.959276, 0.921407, 0.151566), (0.956808, 0.928152, 0.152409), (0.954287, 0.934908, 0.152921), (0.951726, 0.941671, 0.152925), (0.949151, 0.948435, 0.152178), (0.946602, 0.955190, 0.150328), (0.944152, 0.961916, 0.146861), (0.941896, 0.968590, 0.140956), (0.940015, 0.975158, 0.131326))) viridis = _convert( ((0.267004, 0.004874, 0.329415), (0.268510, 0.009605, 0.335427), (0.269944, 0.014625, 0.341379), (0.271305, 0.019942, 0.347269), (0.272594, 0.025563, 0.353093), (0.273809, 0.031497, 0.358853), (0.274952, 0.037752, 0.364543), (0.276022, 0.044167, 0.370164), (0.277018, 0.050344, 0.375715), (0.277941, 0.056324, 0.381191), (0.278791, 0.062145, 0.386592), (0.279566, 0.067836, 0.391917), (0.280267, 0.073417, 0.397163), (0.280894, 0.078907, 0.402329), (0.281446, 0.084320, 0.407414), (0.281924, 0.089666, 0.412415), (0.282327, 0.094955, 0.417331), (0.282656, 0.100196, 0.422160), (0.282910, 0.105393, 0.426902), (0.283091, 0.110553, 0.431554), (0.283197, 0.115680, 0.436115), (0.283229, 0.120777, 0.440584), (0.283187, 0.125848, 0.444960), (0.283072, 0.130895, 0.449241), (0.282884, 0.135920, 0.453427), (0.282623, 0.140926, 0.457517), (0.282290, 0.145912, 0.461510), (0.281887, 0.150881, 0.465405), (0.281412, 0.155834, 0.469201), (0.280868, 0.160771, 0.472899), (0.280255, 0.165693, 0.476498), (0.279574, 0.170599, 0.479997), (0.278826, 0.175490, 0.483397), (0.278012, 0.180367, 0.486697), (0.277134, 0.185228, 0.489898), (0.276194, 0.190074, 0.493001), (0.275191, 0.194905, 0.496005), (0.274128, 0.199721, 0.498911), (0.273006, 0.204520, 0.501721), (0.271828, 0.209303, 0.504434), (0.270595, 0.214069, 0.507052), (0.269308, 0.218818, 0.509577), (0.267968, 0.223549, 0.512008), (0.266580, 0.228262, 0.514349), (0.265145, 0.232956, 0.516599), (0.263663, 0.237631, 0.518762), (0.262138, 0.242286, 0.520837), (0.260571, 0.246922, 0.522828), (0.258965, 0.251537, 0.524736), (0.257322, 0.256130, 0.526563), (0.255645, 0.260703, 0.528312), (0.253935, 0.265254, 0.529983), (0.252194, 0.269783, 0.531579), (0.250425, 0.274290, 0.533103), (0.248629, 0.278775, 0.534556), (0.246811, 0.283237, 0.535941), (0.244972, 0.287675, 0.537260), (0.243113, 0.292092, 0.538516), (0.241237, 0.296485, 0.539709), (0.239346, 0.300855, 0.540844), (0.237441, 0.305202, 0.541921), (0.235526, 0.309527, 0.542944), (0.233603, 0.313828, 0.543914), (0.231674, 0.318106, 0.544834), (0.229739, 0.322361, 0.545706), (0.227802, 0.326594, 0.546532), (0.225863, 0.330805, 0.547314), (0.223925, 0.334994, 0.548053), (0.221989, 0.339161, 0.548752), (0.220057, 0.343307, 0.549413), (0.218130, 0.347432, 0.550038), (0.216210, 0.351535, 0.550627), (0.214298, 0.355619, 0.551184), (0.212395, 0.359683, 0.551710), (0.210503, 0.363727, 0.552206), (0.208623, 0.367752, 0.552675), (0.206756, 0.371758, 0.553117), (0.204903, 0.375746, 0.553533), (0.203063, 0.379716, 0.553925), (0.201239, 0.383670, 0.554294), (0.199430, 0.387607, 0.554642), (0.197636, 0.391528, 0.554969), (0.195860, 0.395433, 0.555276), (0.194100, 0.399323, 0.555565), (0.192357, 0.403199, 0.555836), (0.190631, 0.407061, 0.556089), (0.188923, 0.410910, 0.556326), (0.187231, 0.414746, 0.556547), (0.185556, 0.418570, 0.556753), (0.183898, 0.422383, 0.556944), (0.182256, 0.426184, 0.557120), (0.180629, 0.429975, 0.557282), (0.179019, 0.433756, 0.557430), (0.177423, 0.437527, 0.557565), (0.175841, 0.441290, 0.557685), (0.174274, 0.445044, 0.557792), (0.172719, 0.448791, 0.557885), (0.171176, 0.452530, 0.557965), (0.169646, 0.456262, 0.558030), (0.168126, 0.459988, 0.558082), (0.166617, 0.463708, 0.558119), (0.165117, 0.467423, 0.558141), (0.163625, 0.471133, 0.558148), (0.162142, 0.474838, 0.558140), (0.160665, 0.478540, 0.558115), (0.159194, 0.482237, 0.558073), (0.157729, 0.485932, 0.558013), (0.156270, 0.489624, 0.557936), (0.154815, 0.493313, 0.557840), (0.153364, 0.497000, 0.557724), (0.151918, 0.500685, 0.557587), (0.150476, 0.504369, 0.557430), (0.149039, 0.508051, 0.557250), (0.147607, 0.511733, 0.557049), (0.146180, 0.515413, 0.556823), (0.144759, 0.519093, 0.556572), (0.143343, 0.522773, 0.556295), (0.141935, 0.526453, 0.555991), (0.140536, 0.530132, 0.555659), (0.139147, 0.533812, 0.555298), (0.137770, 0.537492, 0.554906), (0.136408, 0.541173, 0.554483), (0.135066, 0.544853, 0.554029), (0.133743, 0.548535, 0.553541), (0.132444, 0.552216, 0.553018), (0.131172, 0.555899, 0.552459), (0.129933, 0.559582, 0.551864), (0.128729, 0.563265, 0.551229), (0.127568, 0.566949, 0.550556), (0.126453, 0.570633, 0.549841), (0.125394, 0.574318, 0.549086), (0.124395, 0.578002, 0.548287), (0.123463, 0.581687, 0.547445), (0.122606, 0.585371, 0.546557), (0.121831, 0.589055, 0.545623), (0.121148, 0.592739, 0.544641), (0.120565, 0.596422, 0.543611), (0.120092, 0.600104, 0.542530), (0.119738, 0.603785, 0.541400), (0.119512, 0.607464, 0.540218), (0.119423, 0.611141, 0.538982), (0.119483, 0.614817, 0.537692), (0.119699, 0.618490, 0.536347), (0.120081, 0.622161, 0.534946), (0.120638, 0.625828, 0.533488), (0.121380, 0.629492, 0.531973), (0.122312, 0.633153, 0.530398), (0.123444, 0.636809, 0.528763), (0.124780, 0.640461, 0.527068), (0.126326, 0.644107, 0.525311), (0.128087, 0.647749, 0.523491), (0.130067, 0.651384, 0.521608), (0.132268, 0.655014, 0.519661), (0.134692, 0.658636, 0.517649), (0.137339, 0.662252, 0.515571), (0.140210, 0.665859, 0.513427), (0.143303, 0.669459, 0.511215), (0.146616, 0.673050, 0.508936), (0.150148, 0.676631, 0.506589), (0.153894, 0.680203, 0.504172), (0.157851, 0.683765, 0.501686), (0.162016, 0.687316, 0.499129), (0.166383, 0.690856, 0.496502), (0.170948, 0.694384, 0.493803), (0.175707, 0.697900, 0.491033), (0.180653, 0.701402, 0.488189), (0.185783, 0.704891, 0.485273), (0.191090, 0.708366, 0.482284), (0.196571, 0.711827, 0.479221), (0.202219, 0.715272, 0.476084), (0.208030, 0.718701, 0.472873), (0.214000, 0.722114, 0.469588), (0.220124, 0.725509, 0.466226), (0.226397, 0.728888, 0.462789), (0.232815, 0.732247, 0.459277), (0.239374, 0.735588, 0.455688), (0.246070, 0.738910, 0.452024), (0.252899, 0.742211, 0.448284), (0.259857, 0.745492, 0.444467), (0.266941, 0.748751, 0.440573), (0.274149, 0.751988, 0.436601), (0.281477, 0.755203, 0.432552), (0.288921, 0.758394, 0.428426), (0.296479, 0.761561, 0.424223), (0.304148, 0.764704, 0.419943), (0.311925, 0.767822, 0.415586), (0.319809, 0.770914, 0.411152), (0.327796, 0.773980, 0.406640), (0.335885, 0.777018, 0.402049), (0.344074, 0.780029, 0.397381), (0.352360, 0.783011, 0.392636), (0.360741, 0.785964, 0.387814), (0.369214, 0.788888, 0.382914), (0.377779, 0.791781, 0.377939), (0.386433, 0.794644, 0.372886), (0.395174, 0.797475, 0.367757), (0.404001, 0.800275, 0.362552), (0.412913, 0.803041, 0.357269), (0.421908, 0.805774, 0.351910), (0.430983, 0.808473, 0.346476), (0.440137, 0.811138, 0.340967), (0.449368, 0.813768, 0.335384), (0.458674, 0.816363, 0.329727), (0.468053, 0.818921, 0.323998), (0.477504, 0.821444, 0.318195), (0.487026, 0.823929, 0.312321), (0.496615, 0.826376, 0.306377), (0.506271, 0.828786, 0.300362), (0.515992, 0.831158, 0.294279), (0.525776, 0.833491, 0.288127), (0.535621, 0.835785, 0.281908), (0.545524, 0.838039, 0.275626), (0.555484, 0.840254, 0.269281), (0.565498, 0.842430, 0.262877), (0.575563, 0.844566, 0.256415), (0.585678, 0.846661, 0.249897), (0.595839, 0.848717, 0.243329), (0.606045, 0.850733, 0.236712), (0.616293, 0.852709, 0.230052), (0.626579, 0.854645, 0.223353), (0.636902, 0.856542, 0.216620), (0.647257, 0.858400, 0.209861), (0.657642, 0.860219, 0.203082), (0.668054, 0.861999, 0.196293), (0.678489, 0.863742, 0.189503), (0.688944, 0.865448, 0.182725), (0.699415, 0.867117, 0.175971), (0.709898, 0.868751, 0.169257), (0.720391, 0.870350, 0.162603), (0.730889, 0.871916, 0.156029), (0.741388, 0.873449, 0.149561), (0.751884, 0.874951, 0.143228), (0.762373, 0.876424, 0.137064), (0.772852, 0.877868, 0.131109), (0.783315, 0.879285, 0.125405), (0.793760, 0.880678, 0.120005), (0.804182, 0.882046, 0.114965), (0.814576, 0.883393, 0.110347), (0.824940, 0.884720, 0.106217), (0.835270, 0.886029, 0.102646), (0.845561, 0.887322, 0.099702), (0.855810, 0.888601, 0.097452), (0.866013, 0.889868, 0.095953), (0.876168, 0.891125, 0.095250), (0.886271, 0.892374, 0.095374), (0.896320, 0.893616, 0.096335), (0.906311, 0.894855, 0.098125), (0.916242, 0.896091, 0.100717), (0.926106, 0.897330, 0.104071), (0.935904, 0.898570, 0.108131), (0.945636, 0.899815, 0.112838), (0.955300, 0.901065, 0.118128), (0.964894, 0.902323, 0.123941), (0.974417, 0.903590, 0.130215), (0.983868, 0.904867, 0.136897), (0.993248, 0.906157, 0.143936))) __all__ = ['magma', 'inferno', 'plasma', 'viridis']
apache-2.0
ninotoshi/tensorflow
tensorflow/examples/skflow/hdf5_classification.py
9
1779
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from sklearn import metrics, cross_validation from tensorflow.contrib import learn import h5py # Load dataset. iris = learn.datasets.load_dataset('iris') X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target, test_size=0.2, random_state=42) # Note that we are saving and load iris data as h5 format as a simple demonstration here. h5f = h5py.File('test_hdf5.h5', 'w') h5f.create_dataset('X_train', data=X_train) h5f.create_dataset('X_test', data=X_test) h5f.create_dataset('y_train', data=y_train) h5f.create_dataset('y_test', data=y_test) h5f.close() h5f = h5py.File('test_hdf5.h5', 'r') X_train = h5f['X_train'] X_test = h5f['X_test'] y_train = h5f['y_train'] y_test = h5f['y_test'] # Build 3 layer DNN with 10, 20, 10 units respectively. classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3, steps=200) # Fit and predict. classifier.fit(X_train, y_train) score = metrics.accuracy_score(y_test, classifier.predict(X_test)) print('Accuracy: {0:f}'.format(score))
apache-2.0
dsm054/pandas
pandas/tests/series/test_sorting.py
2
9605
# coding=utf-8 import random import numpy as np import pytest from pandas import Categorical, DataFrame, IntervalIndex, MultiIndex, Series import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal, assert_series_equal from .common import TestData class TestSeriesSorting(TestData): def test_sort_values(self): # check indexes are reordered corresponding with the values ser = Series([3, 2, 4, 1], ['A', 'B', 'C', 'D']) expected = Series([1, 2, 3, 4], ['D', 'B', 'A', 'C']) result = ser.sort_values() tm.assert_series_equal(expected, result) ts = self.ts.copy() ts[:5] = np.NaN vals = ts.values result = ts.sort_values() assert np.isnan(result[-5:]).all() tm.assert_numpy_array_equal(result[:-5].values, np.sort(vals[5:])) # na_position result = ts.sort_values(na_position='first') assert np.isnan(result[:5]).all() tm.assert_numpy_array_equal(result[5:].values, np.sort(vals[5:])) # something object-type ser = Series(['A', 'B'], [1, 2]) # no failure ser.sort_values() # ascending=False ordered = ts.sort_values(ascending=False) expected = np.sort(ts.dropna().values)[::-1] assert_almost_equal(expected, ordered.dropna().values) ordered = ts.sort_values(ascending=False, na_position='first') assert_almost_equal(expected, ordered.dropna().values) # ascending=[False] should behave the same as ascending=False ordered = ts.sort_values(ascending=[False]) expected = ts.sort_values(ascending=False) assert_series_equal(expected, ordered) ordered = ts.sort_values(ascending=[False], na_position='first') expected = ts.sort_values(ascending=False, na_position='first') assert_series_equal(expected, ordered) pytest.raises(ValueError, lambda: ts.sort_values(ascending=None)) pytest.raises(ValueError, lambda: ts.sort_values(ascending=[])) pytest.raises(ValueError, lambda: ts.sort_values(ascending=[1, 2, 3])) pytest.raises(ValueError, lambda: ts.sort_values(ascending=[False, False])) pytest.raises(ValueError, lambda: ts.sort_values(ascending='foobar')) # inplace=True ts = self.ts.copy() ts.sort_values(ascending=False, inplace=True) tm.assert_series_equal(ts, self.ts.sort_values(ascending=False)) tm.assert_index_equal(ts.index, self.ts.sort_values(ascending=False).index) # GH 5856/5853 # Series.sort_values operating on a view df = DataFrame(np.random.randn(10, 4)) s = df.iloc[:, 0] def f(): s.sort_values(inplace=True) pytest.raises(ValueError, f) def test_sort_index(self): rindex = list(self.ts.index) random.shuffle(rindex) random_order = self.ts.reindex(rindex) sorted_series = random_order.sort_index() assert_series_equal(sorted_series, self.ts) # descending sorted_series = random_order.sort_index(ascending=False) assert_series_equal(sorted_series, self.ts.reindex(self.ts.index[::-1])) # compat on level sorted_series = random_order.sort_index(level=0) assert_series_equal(sorted_series, self.ts) # compat on axis sorted_series = random_order.sort_index(axis=0) assert_series_equal(sorted_series, self.ts) pytest.raises(ValueError, lambda: random_order.sort_values(axis=1)) sorted_series = random_order.sort_index(level=0, axis=0) assert_series_equal(sorted_series, self.ts) pytest.raises(ValueError, lambda: random_order.sort_index(level=0, axis=1)) def test_sort_index_inplace(self): # For #11402 rindex = list(self.ts.index) random.shuffle(rindex) # descending random_order = self.ts.reindex(rindex) result = random_order.sort_index(ascending=False, inplace=True) assert result is None tm.assert_series_equal(random_order, self.ts.reindex( self.ts.index[::-1])) # ascending random_order = self.ts.reindex(rindex) result = random_order.sort_index(ascending=True, inplace=True) assert result is None tm.assert_series_equal(random_order, self.ts) @pytest.mark.parametrize("level", ['A', 0]) # GH 21052 def test_sort_index_multiindex(self, level): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] # implicit sort_remaining=True res = s.sort_index(level=level) assert_series_equal(backwards, res) # GH13496 # sort has no effect without remaining lvls res = s.sort_index(level=level, sort_remaining=False) assert_series_equal(s, res) def test_sort_index_kind(self): # GH #14444 & #13589: Add support for sort algo choosing series = Series(index=[3, 2, 1, 4, 3]) expected_series = Series(index=[1, 2, 3, 3, 4]) index_sorted_series = series.sort_index(kind='mergesort') assert_series_equal(expected_series, index_sorted_series) index_sorted_series = series.sort_index(kind='quicksort') assert_series_equal(expected_series, index_sorted_series) index_sorted_series = series.sort_index(kind='heapsort') assert_series_equal(expected_series, index_sorted_series) def test_sort_index_na_position(self): series = Series(index=[3, 2, 1, 4, 3, np.nan]) expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4]) index_sorted_series = series.sort_index(na_position='first') assert_series_equal(expected_series_first, index_sorted_series) expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan]) index_sorted_series = series.sort_index(na_position='last') assert_series_equal(expected_series_last, index_sorted_series) def test_sort_index_intervals(self): s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays( [0, 1, 2, 3], [1, 2, 3, 4])) result = s.sort_index() expected = s assert_series_equal(result, expected) result = s.sort_index(ascending=False) expected = Series([3, 2, 1, np.nan], IntervalIndex.from_arrays( [3, 2, 1, 0], [4, 3, 2, 1])) assert_series_equal(result, expected) def test_sort_values_categorical(self): c = Categorical(["a", "b", "b", "a"], ordered=False) cat = Series(c.copy()) # sort in the categories order expected = Series( Categorical(["a", "a", "b", "b"], ordered=False), index=[0, 3, 1, 2]) result = cat.sort_values() tm.assert_series_equal(result, expected) cat = Series(Categorical(["a", "c", "b", "d"], ordered=True)) res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=np.object_) tm.assert_numpy_array_equal(res.__array__(), exp) cat = Series(Categorical(["a", "c", "b", "d"], categories=[ "a", "b", "c", "d"], ordered=True)) res = cat.sort_values() exp = np.array(["a", "b", "c", "d"], dtype=np.object_) tm.assert_numpy_array_equal(res.__array__(), exp) res = cat.sort_values(ascending=False) exp = np.array(["d", "c", "b", "a"], dtype=np.object_) tm.assert_numpy_array_equal(res.__array__(), exp) raw_cat1 = Categorical(["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False) raw_cat2 = Categorical(["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True) s = ["a", "b", "c", "d"] df = DataFrame({"unsort": raw_cat1, "sort": raw_cat2, "string": s, "values": [1, 2, 3, 4]}) # Cats must be sorted in a dataframe res = df.sort_values(by=["string"], ascending=False) exp = np.array(["d", "c", "b", "a"], dtype=np.object_) tm.assert_numpy_array_equal(res["sort"].values.__array__(), exp) assert res["sort"].dtype == "category" res = df.sort_values(by=["sort"], ascending=False) exp = df.sort_values(by=["string"], ascending=True) tm.assert_series_equal(res["values"], exp["values"]) assert res["sort"].dtype == "category" assert res["unsort"].dtype == "category" # unordered cat, but we allow this df.sort_values(by=["unsort"], ascending=False) # multi-columns sort # GH 7848 df = DataFrame({"id": [6, 5, 4, 3, 2, 1], "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']}) df["grade"] = Categorical(df["raw_grade"], ordered=True) df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a']) # sorts 'grade' according to the order of the categories result = df.sort_values(by=['grade']) expected = df.iloc[[1, 2, 5, 0, 3, 4]] tm.assert_frame_equal(result, expected) # multi result = df.sort_values(by=['grade', 'id']) expected = df.iloc[[2, 1, 5, 4, 3, 0]] tm.assert_frame_equal(result, expected)
bsd-3-clause
coder-james/mxnet
python/mxnet/notebook/callback.py
13
13200
# pylint: disable=fixme, invalid-name, missing-docstring, no-init, old-style-class, multiple-statements # pylint: disable=arguments-differ, too-many-arguments, no-member """Visualization callback function """ try: import datetime except ImportError: class Datetime_Failed_To_Import: pass datetime = Datetime_Failed_To_Import try: import bokeh.plotting except ImportError: pass try: from collections import defaultdict except ImportError: class Defaultdict_Failed_To_Import: pass defaultdict = Defaultdict_Failed_To_Import try: import pandas as pd except ImportError: class Pandas_Failed_To_Import: pass pd = Pandas_Failed_To_Import import time # pylint: enable=missing-docstring, no-init, old-style-class, multiple-statements def _add_new_columns(dataframe, metrics): """Add new metrics as new columns to selected pandas dataframe. Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified. metrics : metric.EvalMetric New metrics to be added. """ #TODO(leodirac): we don't really need to do this on every update. Optimize new_columns = set(metrics.keys()) - set(dataframe.columns) for col in new_columns: dataframe[col] = None def _extend(baseData, newData): """Assuming a is shorter than b, copy the end of b onto a """ baseData.extend(newData[len(baseData):]) class PandasLogger(object): """Logs statistics about training run into Pandas dataframes. Records three separate dataframes: train, eval, epoch. Parameters ---------- batch_size: int batch_size of data frequent: int How many training mini-batches between calculations. Defaults to calculating every 50 batches. (Eval data is stored once per epoch over the entire eval data set.) """ def __init__(self, batch_size, frequent=50): self.batch_size = batch_size self.frequent = frequent self._dataframes = { 'train': pd.DataFrame(), 'eval': pd.DataFrame(), 'epoch': pd.DataFrame(), } self.last_time = time.time() self.start_time = datetime.datetime.now() self.last_epoch_time = datetime.datetime.now() @property def train_df(self): """The dataframe with training data. This has metrics for training minibatches, logged every "frequent" batches. (frequent is a constructor param) """ return self._dataframes['train'] @property def eval_df(self): """The dataframe with evaluation data. This has validation scores calculated at the end of each epoch. """ return self._dataframes['eval'] @property def epoch_df(self): """The dataframe with epoch data. This has timing information. """ return self._dataframes['epoch'] @property def all_dataframes(self): """Return a dict of dataframes """ return self._dataframes def elapsed(self): """Calcaulate the elapsed time from training starting. """ return datetime.datetime.now() - self.start_time def append_metrics(self, metrics, df_name): """Append new metrics to selected dataframes. Parameters ---------- metrics : metric.EvalMetric New metrics to be added. df_name : str Name of the dataframe to be modified. """ dataframe = self._dataframes[df_name] _add_new_columns(dataframe, metrics) dataframe.loc[len(dataframe)] = metrics def train_cb(self, param): """Callback funtion for training. """ if param.nbatch % self.frequent == 0: self._process_batch(param, 'train') def eval_cb(self, param): """Callback function for evaluation """ self._process_batch(param, 'eval') def _process_batch(self, param, dataframe): """Update parameters for selected dataframe after a completed batch Parameters ---------- dataframe : pandas.DataFrame Selected dataframe needs to be modified. """ now = time.time() if param.eval_metric is not None: metrics = dict(param.eval_metric.get_name_value()) param.eval_metric.reset() else: metrics = {} speed = self.frequent / (now - self.last_time) metrics['batches_per_sec'] = speed * self.batch_size metrics['records_per_sec'] = speed metrics['elapsed'] = self.elapsed() metrics['minibatch_count'] = param.nbatch metrics['epoch'] = param.epoch self.append_metrics(metrics, dataframe) self.last_time = now def epoch_cb(self): """Callback function after each epoch. Now it records each epoch time and append it to epoch dataframe. """ metrics = {} metrics['elapsed'] = self.elapsed() now = datetime.datetime.now() metrics['epoch_time'] = now - self.last_epoch_time self.append_metrics(metrics, 'epoch') self.last_epoch_time = now def callback_args(self): """returns **kwargs parameters for model.fit() to enable all callbacks. e.g. model.fit(X=train, eval_data=test, **pdlogger.callback_args()) """ return { 'batch_end_callback': self.train_cb, 'eval_end_callback': self.eval_cb, 'epoch_end_callback': self.epoch_cb, } class LiveBokehChart(object): """Callback object that renders a bokeh chart in a jupyter notebook that gets updated as the training run proceeds. Requires a PandasLogger to collect the data it will render. This is an abstract base-class. Sub-classes define the specific chart. """ def __init__(self, pandas_logger, metric_name, display_freq=10, batch_size=None, frequent=50): if pandas_logger: self.pandas_logger = pandas_logger else: self.pandas_logger = PandasLogger(batch_size=batch_size, frequent=frequent) self.display_freq = display_freq self.last_update = time.time() #NOTE: would be nice to auto-detect the metric_name if there's only one. self.metric_name = metric_name bokeh.io.output_notebook() self.handle = self.setup_chart() def setup_chart(self): """Render a bokeh object and return a handle to it. """ raise NotImplementedError("Incomplete base class: LiveBokehChart must be sub-classed") def update_chart_data(self): """Update the bokeh object with new data. """ raise NotImplementedError("Incomplete base class: LiveBokehChart must be sub-classed") def interval_elapsed(self): """Check whether it is time to update plot. Returns ------- Boolean value of whethe to update now """ return time.time() - self.last_update > self.display_freq def _push_render(self): """Render the plot with bokeh.io and push to notebook. """ bokeh.io.push_notebook(handle=self.handle) self.last_update = time.time() def _do_update(self): """Update the plot chart data and render the updates. """ self.update_chart_data() self._push_render() def batch_cb(self, param): """Callback function after a completed batch. """ if self.interval_elapsed(): self._do_update() def eval_cb(self, param): """Callback function after an evaluation. """ # After eval results, force an update. self._do_update() def callback_args(self): """returns **kwargs parameters for model.fit() to enable all callbacks. e.g. model.fit(X=train, eval_data=test, **pdlogger.callback_args()) """ return { 'batch_end_callback': self.batch_cb, 'eval_end_callback': self.eval_cb, } class LiveTimeSeries(LiveBokehChart): """Plot the elasped time during live learning. """ def __init__(self, **fig_params): self.fig = bokeh.plotting.Figure(x_axis_type='datetime', x_axis_label='Elapsed time', **fig_params) super(LiveTimeSeries, self).__init__(None, None) # TODO: clean up this class hierarchy def setup_chart(self): self.start_time = datetime.datetime.now() self.x_axis_val = [] self.y_axis_val = [] self.fig.line(self.x_axis_val, self.y_axis_val) return bokeh.plotting.show(self.fig, notebook_handle=True) def elapsed(self): """Calculate elasped time from starting """ return datetime.datetime.now() - self.start_time def update_chart_data(self, value): self.x_axis_val.append(self.elapsed()) self.y_axis_val.append(value) self._push_render() class LiveLearningCurve(LiveBokehChart): """Draws a learning curve with training & validation metrics over time as the network trains. """ def __init__(self, metric_name, display_freq=10, frequent=50): self.frequent = frequent self.start_time = datetime.datetime.now() self._data = { 'train': {'elapsed': [],}, 'eval': {'elapsed': [],}, } super(LiveLearningCurve, self).__init__(None, metric_name, display_freq, frequent) def setup_chart(self): self.fig = bokeh.plotting.Figure(x_axis_type='datetime', x_axis_label='Training time') #TODO(leodirac): There's got to be a better way to # get a bokeh plot to dynamically update as a pandas dataframe changes, # instead of copying into a list. # I can't figure it out though. Ask a pyData expert. self.x_axis_val1 = [] self.y_axis_val1 = [] self.train1 = self.fig.line(self.x_axis_val1, self.y_axis_val1, line_dash='dotted', alpha=0.3, legend="train") self.train2 = self.fig.circle(self.x_axis_val1, self.y_axis_val1, size=1.5, line_alpha=0.3, fill_alpha=0.3, legend="train") self.train2.visible = False # Turn this on later. self.x_axis_val2 = [] self.y_axis_val2 = [] self.valid1 = self.fig.line(self.x_axis_val2, self.y_axis_val2, line_color='green', line_width=2, legend="validation") self.valid2 = self.fig.circle(self.x_axis_val2, self.y_axis_val2, line_color='green', line_width=2, legend=None) self.fig.legend.location = "bottom_right" self.fig.yaxis.axis_label = self.metric_name return bokeh.plotting.show(self.fig, notebook_handle=True) def _do_update(self): self.update_chart_data() self._push_render() def batch_cb(self, param): if param.nbatch % self.frequent == 0: self._process_batch(param, 'train') if self.interval_elapsed(): self._do_update() def eval_cb(self, param): # After eval results, force an update. self._process_batch(param, 'eval') self._do_update() def _process_batch(self, param, df_name): """Update selected dataframe after a completed batch Parameters ---------- df_name : str Selected dataframe name needs to be modified. """ if param.eval_metric is not None: metrics = dict(param.eval_metric.get_name_value()) param.eval_metric.reset() else: metrics = {} metrics['elapsed'] = datetime.datetime.now() - self.start_time for key, value in metrics.items(): if not self._data[df_name].has_key(key): self._data[df_name][key] = [] self._data[df_name][key].append(value) def update_chart_data(self): dataframe = self._data['train'] if len(dataframe['elapsed']): _extend(self.x_axis_val1, dataframe['elapsed']) _extend(self.y_axis_val1, dataframe[self.metric_name]) dataframe = self._data['eval'] if len(dataframe['elapsed']): _extend(self.x_axis_val2, dataframe['elapsed']) _extend(self.y_axis_val2, dataframe[self.metric_name]) if len(dataframe) > 10: self.train1.visible = False self.train2.visible = True def args_wrapper(*args): """Generates callback arguments for model.fit() for a set of callback objects. Callback objects like PandasLogger(), LiveLearningCurve() get passed in. This assembles all their callback arguments. """ out = defaultdict(list) for callback in args: callback_args = callback.callback_args() for k, v in callback_args.items(): out[k].append(v) return dict(out)
apache-2.0
Javiercerna/MissionPlanner
Lib/site-packages/scipy/signal/ltisys.py
53
23848
""" ltisys -- a collection of classes and functions for modeling linear time invariant systems. """ # # Author: Travis Oliphant 2001 # # Feb 2010: Warren Weckesser # Rewrote lsim2 and added impulse2. # from filter_design import tf2zpk, zpk2tf, normalize import numpy from numpy import product, zeros, array, dot, transpose, ones, \ nan_to_num, zeros_like, linspace #import scipy.interpolate as interpolate import scipy.integrate as integrate import scipy.linalg as linalg from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \ squeeze, diag, asarray def tf2ss(num, den): """Transfer function to state-space representation. Parameters ---------- num, den : array_like Sequences representing the numerator and denominator polynomials. Returns ------- A, B, C, D : ndarray State space representation of the system. """ # Controller canonical state-space representation. # if M+1 = len(num) and K+1 = len(den) then we must have M <= K # states are found by asserting that X(s) = U(s) / D(s) # then Y(s) = N(s) * X(s) # # A, B, C, and D follow quite naturally. # num, den = normalize(num, den) # Strips zeros, checks arrays nn = len(num.shape) if nn == 1: num = asarray([num], num.dtype) M = num.shape[1] K = len(den) if (M > K): raise ValueError("Improper transfer function.") if (M == 0 or K == 0): # Null system return array([],float), array([], float), array([], float), \ array([], float) # pad numerator to have same number of columns has denominator num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num] if num.shape[-1] > 0: D = num[:,0] else: D = array([],float) if K == 1: return array([], float), array([], float), array([], float), D frow = -array([den[1:]]) A = r_[frow, eye(K-2, K-1)] B = eye(K-1, 1) C = num[:,1:] - num[:,0] * den[1:] return A, B, C, D def _none_to_empty(arg): if arg is None: return [] else: return arg def abcd_normalize(A=None, B=None, C=None, D=None): """Check state-space matrices and ensure they are rank-2. """ A, B, C, D = map(_none_to_empty, (A, B, C, D)) A, B, C, D = map(atleast_2d, (A, B, C, D)) if ((len(A.shape) > 2) or (len(B.shape) > 2) or \ (len(C.shape) > 2) or (len(D.shape) > 2)): raise ValueError("A, B, C, D arrays can be no larger than rank-2.") MA, NA = A.shape MB, NB = B.shape MC, NC = C.shape MD, ND = D.shape if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0): MC, NC = MD, NA C = zeros((MC, NC)) if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0): MB, NB = MA, ND B = zeros(MB, NB) if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0): MD, ND = MC, NB D = zeros(MD, ND) if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0): MA, NA = MB, NC A = zeros(MA, NA) if MA != NA: raise ValueError("A must be square.") if MA != MB: raise ValueError("A and B must have the same number of rows.") if NA != NC: raise ValueError("A and C must have the same number of columns.") if MD != MC: raise ValueError("C and D must have the same number of rows.") if ND != NB: raise ValueError("B and D must have the same number of columns.") return A, B, C, D def ss2tf(A, B, C, D, input=0): """State-space to transfer function. Parameters ---------- A, B, C, D : ndarray State-space representation of linear system. input : int, optional For multiple-input systems, the input to use. Returns ------- num, den : 1D ndarray Numerator and denominator polynomials (as sequences) respectively. """ # transfer function is C (sI - A)**(-1) B + D A, B, C, D = map(asarray, (A, B, C, D)) # Check consistency and # make them all rank-2 arrays A, B, C, D = abcd_normalize(A, B, C, D) nout, nin = D.shape if input >= nin: raise ValueError("System does not have the input specified.") # make MOSI from possibly MOMI system. if B.shape[-1] != 0: B = B[:,input] B.shape = (B.shape[0],1) if D.shape[-1] != 0: D = D[:,input] try: den = poly(A) except ValueError: den = 1 if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0): num = numpy.ravel(D) if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0): den = [] return num, den num_states = A.shape[0] type_test = A[:,0] + B[:,0] + C[0,:] + D num = numpy.zeros((nout, num_states+1), type_test.dtype) for k in range(nout): Ck = atleast_2d(C[k,:]) num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den return num, den def zpk2ss(z, p, k): """Zero-pole-gain representation to state-space representation Parameters ---------- z, p : sequence Zeros and poles. k : float System gain. Returns ------- A, B, C, D : ndarray State-space matrices. """ return tf2ss(*zpk2tf(z,p,k)) def ss2zpk(A, B, C, D, input=0): """State-space representation to zero-pole-gain representation. Parameters ---------- A, B, C, D : ndarray State-space representation of linear system. input : int, optional For multiple-input systems, the input to use. Returns ------- z, p : sequence Zeros and poles. k : float System gain. """ return tf2zpk(*ss2tf(A,B,C,D,input=input)) class lti(object): """Linear Time Invariant class which simplifies representation. """ def __init__(self,*args,**kwords): """Initialize the LTI system using either: (numerator, denominator) (zeros, poles, gain) (A, B, C, D) -- state-space. """ N = len(args) if N == 2: # Numerator denominator transfer function input self.__dict__['num'], self.__dict__['den'] = normalize(*args) self.__dict__['zeros'], self.__dict__['poles'], \ self.__dict__['gain'] = tf2zpk(*args) self.__dict__['A'], self.__dict__['B'], \ self.__dict__['C'], \ self.__dict__['D'] = tf2ss(*args) self.inputs = 1 if len(self.num.shape) > 1: self.outputs = self.num.shape[0] else: self.outputs = 1 elif N == 3: # Zero-pole-gain form self.__dict__['zeros'], self.__dict__['poles'], \ self.__dict__['gain'] = args self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args) self.__dict__['A'], self.__dict__['B'], \ self.__dict__['C'], \ self.__dict__['D'] = zpk2ss(*args) self.inputs = 1 if len(self.zeros.shape) > 1: self.outputs = self.zeros.shape[0] else: self.outputs = 1 elif N == 4: # State-space form self.__dict__['A'], self.__dict__['B'], \ self.__dict__['C'], \ self.__dict__['D'] = abcd_normalize(*args) self.__dict__['zeros'], self.__dict__['poles'], \ self.__dict__['gain'] = ss2zpk(*args) self.__dict__['num'], self.__dict__['den'] = ss2tf(*args) self.inputs = self.B.shape[-1] self.outputs = self.C.shape[0] else: raise ValueError("Needs 2, 3, or 4 arguments.") def __setattr__(self, attr, val): if attr in ['num','den']: self.__dict__[attr] = val self.__dict__['zeros'], self.__dict__['poles'], \ self.__dict__['gain'] = \ tf2zpk(self.num, self.den) self.__dict__['A'], self.__dict__['B'], \ self.__dict__['C'], \ self.__dict__['D'] = \ tf2ss(self.num, self.den) elif attr in ['zeros', 'poles', 'gain']: self.__dict__[attr] = val self.__dict__['num'], self.__dict__['den'] = \ zpk2tf(self.zeros, self.poles, self.gain) self.__dict__['A'], self.__dict__['B'], \ self.__dict__['C'], \ self.__dict__['D'] = \ zpk2ss(self.zeros, self.poles, self.gain) elif attr in ['A', 'B', 'C', 'D']: self.__dict__[attr] = val self.__dict__['zeros'], self.__dict__['poles'], \ self.__dict__['gain'] = \ ss2zpk(self.A, self.B, self.C, self.D) self.__dict__['num'], self.__dict__['den'] = \ ss2tf(self.A, self.B, self.C, self.D) else: self.__dict__[attr] = val def impulse(self, X0=None, T=None, N=None): return impulse(self, X0=X0, T=T, N=N) def step(self, X0=None, T=None, N=None): return step(self, X0=X0, T=T, N=N) def output(self, U, T, X0=None): return lsim(self, U, T, X0=X0) def lsim2(system, U=None, T=None, X0=None, **kwargs): """ Simulate output of a continuous-time linear system, by using the ODE solver `scipy.integrate.odeint`. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 2: (num, den) * 3: (zeros, poles, gain) * 4: (A, B, C, D) U : array_like (1D or 2D), optional An input array describing the input at each time T. Linear interpolation is used between given times. If there are multiple inputs, then each column of the rank-2 array represents an input. If U is not given, the input is assumed to be zero. T : array_like (1D or 2D), optional The time steps at which the input is defined and at which the output is desired. The default is 101 evenly spaced points on the interval [0,10.0]. X0 : array_like (1D), optional The initial condition of the state vector. If `X0` is not given, the initial conditions are assumed to be 0. kwargs : dict Additional keyword arguments are passed on to the function odeint. See the notes below for more details. Returns ------- T : 1D ndarray The time values for the output. yout : ndarray The response of the system. xout : ndarray The time-evolution of the state-vector. Notes ----- This function uses :func:`scipy.integrate.odeint` to solve the system's differential equations. Additional keyword arguments given to `lsim2` are passed on to `odeint`. See the documentation for :func:`scipy.integrate.odeint` for the full list of arguments. """ if isinstance(system, lti): sys = system else: sys = lti(*system) if X0 is None: X0 = zeros(sys.B.shape[0],sys.A.dtype) if T is None: # XXX T should really be a required argument, but U was # changed from a required positional argument to a keyword, # and T is after U in the argument list. So we either: change # the API and move T in front of U; check here for T being # None and raise an excpetion; or assign a default value to T # here. This code implements the latter. T = linspace(0, 10.0, 101) T = atleast_1d(T) if len(T.shape) != 1: raise ValueError("T must be a rank-1 array.") if U is not None: U = atleast_1d(U) if len(U.shape) == 1: U = U.reshape(-1,1) sU = U.shape if sU[0] != len(T): raise ValueError("U must have the same number of rows " "as elements in T.") if sU[1] != sys.inputs: raise ValueError("The number of inputs in U (%d) is not " "compatible with the number of system " "inputs (%d)" % (sU[1], sys.inputs)) # Create a callable that uses linear interpolation to # calculate the input at any time. ufunc = interpolate.interp1d(T, U, kind='linear', axis=0, bounds_error=False) def fprime(x, t, sys, ufunc): """The vector field of the linear system.""" return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t])))) xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs) yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U)) else: def fprime(x, t, sys): """The vector field of the linear system.""" return dot(sys.A,x) xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs) yout = dot(sys.C,transpose(xout)) return T, squeeze(transpose(yout)), xout def lsim(system, U, T, X0=None, interp=1): """ Simulate output of a continuous-time linear system. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 2: (num, den) * 3: (zeros, poles, gain) * 4: (A, B, C, D) U : array_like An input array describing the input at each time `T` (interpolation is assumed between given times). If there are multiple inputs, then each column of the rank-2 array represents an input. T : array_like The time steps at which the input is defined and at which the output is desired. X0 : The initial conditions on the state vector (zero by default). interp : {1, 0} Whether to use linear (1) or zero-order hold (0) interpolation. Returns ------- T : 1D ndarray Time values for the output. yout : 1D ndarray System response. xout : ndarray Time-evolution of the state-vector. """ # system is an lti system or a sequence # with 2 (num, den) # 3 (zeros, poles, gain) # 4 (A, B, C, D) # describing the system # U is an input vector at times T # if system describes multiple inputs # then U can be a rank-2 array with the number of columns # being the number of inputs if isinstance(system, lti): sys = system else: sys = lti(*system) U = atleast_1d(U) T = atleast_1d(T) if len(U.shape) == 1: U = U.reshape((U.shape[0],1)) sU = U.shape if len(T.shape) != 1: raise ValueError("T must be a rank-1 array.") if sU[0] != len(T): raise ValueError("U must have the same number of rows " "as elements in T.") if sU[1] != sys.inputs: raise ValueError("System does not define that many inputs.") if X0 is None: X0 = zeros(sys.B.shape[0], sys.A.dtype) xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype) xout[0] = X0 A = sys.A AT, BT = transpose(sys.A), transpose(sys.B) dt = T[1]-T[0] lam, v = linalg.eig(A) vt = transpose(v) vti = linalg.inv(vt) GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype) ATm1 = linalg.inv(AT) ATm2 = dot(ATm1,ATm1) I = eye(A.shape[0],dtype=A.dtype) GTmI = GT-I F1T = dot(dot(BT,GTmI),ATm1) if interp: F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1) for k in xrange(1,len(T)): dt1 = T[k] - T[k-1] if dt1 != dt: dt = dt1 GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype) GTmI = GT-I F1T = dot(dot(BT,GTmI),ATm1) if interp: F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1) xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T) if interp: xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T) yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C))) return T, squeeze(yout), squeeze(xout) def _default_response_times(A, n): """Compute a reasonable set of time samples for the response time. This function is used by `impulse`, `impulse2`, `step` and `step2` to compute the response time when the `T` argument to the function is None. Parameters ---------- A : ndarray The system matrix, which is square. n : int The number of time samples to generate. Returns ------- t : ndarray The 1-D array of length `n` of time samples at which the response is to be computed. """ # Create a reasonable time interval. This could use some more work. # For example, what is expected when the system is unstable? vals = linalg.eigvals(A) r = min(abs(real(vals))) if r == 0.0: r = 1.0 tc = 1.0 / r t = linspace(0.0, 7*tc, n) return t def impulse(system, X0=None, T=None, N=None): """Impulse response of continuous-time system. Parameters ---------- system : LTI class or tuple If specified as a tuple, the system is described as ``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``. X0 : array_like, optional Initial state-vector. Defaults to zero. T : array_like, optional Time points. Computed if not given. N : int, optional The number of time points to compute (if `T` is not given). Returns ------- T : ndarray A 1-D array of time points. yout : ndarray A 1-D array containing the impulse response of the system (except for singularities at zero). """ if isinstance(system, lti): sys = system else: sys = lti(*system) if X0 is None: B = sys.B else: B = sys.B + X0 if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) h = zeros(T.shape, sys.A.dtype) s,v = linalg.eig(sys.A) vi = linalg.inv(v) C = sys.C for k in range(len(h)): es = diag(numpy.exp(s*T[k])) eA = (dot(dot(v,es),vi)).astype(h.dtype) h[k] = squeeze(dot(dot(C,eA),B)) return T, h def impulse2(system, X0=None, T=None, N=None, **kwargs): """ Impulse response of a single-input, continuous-time linear system. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: 2 (num, den) 3 (zeros, poles, gain) 4 (A, B, C, D) T : 1-D array_like, optional The time steps at which the input is defined and at which the output is desired. If `T` is not given, the function will generate a set of time samples automatically. X0 : 1-D array_like, optional The initial condition of the state vector. Default: 0 (the zero vector). N : int, optional Number of time points to compute. Default: 100. kwargs : various types Additional keyword arguments are passed on to the function `scipy.signal.lsim2`, which in turn passes them on to `scipy.integrate.odeint`; see the latter's documentation for information about these arguments. Returns ------- T : ndarray The time values for the output. yout : ndarray The output response of the system. See Also -------- impulse, lsim2, integrate.odeint Notes ----- The solution is generated by calling `scipy.signal.lsim2`, which uses the differential equation solver `scipy.integrate.odeint`. .. versionadded:: 0.8.0 Examples -------- Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t) >>> system = ([1.0], [1.0, 2.0, 1.0]) >>> t, y = impulse2(system) >>> import matplotlib.pyplot as plt >>> plt.plot(t, y) """ if isinstance(system, lti): sys = system else: sys = lti(*system) B = sys.B if B.shape[-1] != 1: raise ValueError("impulse2() requires a single-input system.") B = B.squeeze() if X0 is None: X0 = zeros_like(B) if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) # Move the impulse in the input to the initial conditions, and then # solve using lsim2(). U = zeros_like(T) ic = B + X0 Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs) return Tr, Yr def step(system, X0=None, T=None, N=None): """Step response of continuous-time system. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation. 2 (num, den) 3 (zeros, poles, gain) 4 (A, B, C, D) X0 : array_like, optional Initial state-vector (default is zero). T : array_like, optional Time points (computed if not given). N : int Number of time points to compute if `T` is not given. Returns ------- T : 1D ndarray Output time points. yout : 1D ndarray Step response of system. See also -------- scipy.signal.step2 """ if isinstance(system, lti): sys = system else: sys = lti(*system) if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) U = ones(T.shape, sys.A.dtype) vals = lsim(sys, U, T, X0=X0) return vals[0], vals[1] def step2(system, X0=None, T=None, N=None, **kwargs): """Step response of continuous-time system. This function is functionally the same as `scipy.signal.step`, but it uses the function `scipy.signal.lsim2` to compute the step response. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation. 2 (num, den) 3 (zeros, poles, gain) 4 (A, B, C, D) X0 : array_like, optional Initial state-vector (default is zero). T : array_like, optional Time points (computed if not given). N : int Number of time points to compute if `T` is not given. **kwargs : Additional keyword arguments are passed on the function `scipy.signal.lsim2`, which in turn passes them on to :func:`scipy.integrate.odeint`. See the documentation for :func:`scipy.integrate.odeint` for information about these arguments. Returns ------- T : 1D ndarray Output time points. yout : 1D ndarray Step response of system. See also -------- scipy.signal.step Notes ----- .. versionadded:: 0.8.0 """ if isinstance(system, lti): sys = system else: sys = lti(*system) if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) U = ones(T.shape, sys.A.dtype) vals = lsim2(sys, U, T, X0=X0, **kwargs) return vals[0], vals[1]
gpl-3.0
bobbymckinney/hall_measurement
program_roomtemp/checkIV.py
3
15744
#! /usr/bin/python # -*- coding: utf-8 -*- import os import matplotlib from matplotlib.figure import Figure from matplotlib.pyplot import gcf, setp import matplotlib.animation as animation # For plotting import pylab import numpy as np import matplotlib.pyplot as plt import visa # pyvisa, essential for communicating with the Keithley from threading import Thread # For threading the processes going on behind the GUI import time from datetime import datetime # for getting the current date and time current = .01 # (A) Current that is sourced by the k2400 # Global placers for instruments k2700 = '' k2400 = '' k2182 = '' ls643 = '' #ResourceManager for visa instrument control ResourceManager = visa.ResourceManager() ############################################################################### class Keithley_2700: ''' Used for the matrix card operations. ''' #-------------------------------------------------------------------------- def __init__(self, instr): self.ctrl = ResourceManager.open_resource(instr) self.openAllChannels() #end init #-------------------------------------------------------------------------- def closeChannels(self, channels): self.ctrl.write(":ROUT:MULT:CLOS (@ %s)" %channels) #end def #-------------------------------------------------------------------------- def openChannels(self, channels): self.ctrl.write(":ROUT:MULT:OPEN (@ %s)" %channels) #end def #-------------------------------------------------------------------------- def openAllChannels(self): self.ctrl.write("ROUTe:OPEN:ALL") #end def #-------------------------------------------------------------------------- def get_closedChannels(self): return self.ctrl.query(":ROUT:MULT:CLOS?") #end def #end class ############################################################################### ############################################################################### class Keithley_2400: ''' SourceMeter ''' #-------------------------------------------------------------------------- def __init__(self, instr): self.ctrl = ResourceManager.open_resource(instr) self.ctrl.write(":ROUT:TERM REAR") # Use the rear output terminals self.current_mode() self.set_current_range(10.5*10**(-3)) # Default self.set_current(float(current)) #end init #-------------------------------------------------------------------------- def turn_source_on(self): self.ctrl.write(":OUTPut:STATe ON") #end def #-------------------------------------------------------------------------- def turn_source_off(self): self.ctrl.write(":OUTPut:STATe OFF") #end def #-------------------------------------------------------------------------- def query_state(self): state = self.ctrl.query(":OUTPut:STATe?") if state == '1': amplitude = self.ctrl.query(":SOURce:CURRent:LEVel:IMMediate:AMPLitude?") + ' Amps' if amplitude == '0.000000E+00 Amps': amplitude = self.ctrl.query(":SOURce:VOLTage:LEVel:IMMediate:AMPLitude?") + ' Volts' return 'state: %s, amplitude: %s ' % (state, amplitude) else: return 'state: %s' % state #end def #-------------------------------------------------------------------------- def current_mode(self): self.ctrl.write(":SOURce:FUNCtion:MODE CURRent") self.ctrl.write(":SOURce:CURRent:MODE FIXed") # Fixed current mode #end def #-------------------------------------------------------------------------- def set_current(self, current): self.change_current_range(current) #time.sleep(5) self.ctrl.write(":SOURce:CURRent:LEVel:IMMediate:AMPLitude %f" % current) #end def #-------------------------------------------------------------------------- def change_current_range(self, current): #self.write(":SOURce:CURRent:LEVel:IMMediate:AMPLitude 0") if current > 0: if current > 105*10**(-3): self.set_current_range(1.05) else: if current > 10.5*10**(-3): self.set_current_range(105*10**(-3)) else: if current > 1.05*10**(-3): self.set_current_range(10.5*10**(-3)) else: if current > 105*10**(-6): self.set_current_range(1.05*10**(-3)) else: if current > 10.5*10**(-6): self.set_current_range(105*10**(-6)) else: if current > 1.05*10**(-6): self.set_current_range(10.5*10**(-6)) else: self.set_current_range(1.05*10**(-6)) elif current < 0: if current < -105*10**(-3): self.set_current_range(-1.05) else: if current < -10.5*10**(-3): self.set_current_range(-105*10**(-3)) else: if current < -1.05*10**(-3): self.set_current_range(-10.5*10**(-3)) else: if current < -105*10**(-6): self.set_current_range(-1.05*10**(-3)) else: if current < -10.5*10**(-6): self.set_current_range(-105*10**(-6)) else: if current < -1.05*10**(-6): self.set_current_range(-10.5*10**(-6)) else: self.set_current_range(-1.05*10**(-6)) else: self.set_current_range(1.05*10**(-6)) #end def #-------------------------------------------------------------------------- def set_current_range(self, current): self.ctrl.write(":SOURce:CURRent:RANGe %f" % current) #end def #-------------------------------------------------------------------------- def voltage_mode(self): self.ctrl.write(":SOURce:FUNCtion:MODE VOLTage") self.ctrl.write(":SOURce:VOLTage:MODE FIXed") # Fixed voltage mode #end def #-------------------------------------------------------------------------- def set_voltage(self, voltage): self.ctrl.write(":SOURce:VOLTage:LEVel:IMMediate:AMPLitude %f" % voltage) #end def #end class ############################################################################### ############################################################################### class Keithley_2182: ''' NanoVoltMeter ''' #-------------------------------------------------------------------------- def __init__(self, instr): self.ctrl = ResourceManager.open_resource(instr) self.ctrl.write(":TRIGger:SEQuence1:COUNt 1") self.ctrl.write(":TRIGger:SEQuence1:DELay 0") # Set count rate self.ctrl.write(":SENSe:FUNCtion VOLTage") self.ctrl.write(":SENS:VOLT:CHAN1:RANG:AUTO ON") self.ctrl.write(":SENSe1:VOLTage:DC:NPLCycles 5") # Sets integration period based on frequency #end init #-------------------------------------------------------------------------- def fetch(self): """ Scan the channel and take a reading """ #self.write(":ROUTe:SCAN:INTernal:CCOunt 1") # Specify number of readings on channel 1 self.ctrl.write(":SENSe:CHANnel 1") data = self.ctrl.query(":SENSe:DATA:FRESh?") #print str(data)[0:15] #print data return str(data)[0:15] # Fetches Reading #end def #end class ############################################################################### ############################################################################### class Setup: """ Call this class to run the setup for the Keithley and the PID. """ def __init__(self): """ Prepare the Keithley to take data on the specified channels: """ global k2700 global k2400 global k2182 # Define Keithley instrument ports: self.k2700 = k2700 = Keithley_2700('GPIB0::2::INSTR') # MultiMeter for Matrix Card operation self.k2400 = k2400 = Keithley_2400('GPIB0::3::INSTR') # SourceMeter self.k2182 = k2182 = Keithley_2182('GPIB0::4::INSTR') # NanoVoltMeter #end class ############################################################################### ############################################################################### class CheckIV: #-------------------------------------------------------------------------- def __init__(self): global k2700 global k2400 global k2182 global current self.k2700 = k2700 self.k2400 = k2400 self.k2182 = k2182 self.delay = .7 self.current = .01 #self.I = {} #self.V = {} #self.V_fit = {} self.Data = {} # short the matrix card self.k2700.closeChannels('117, 125, 126, 127, 128, 129, 130') print(self.k2700.get_closedChannels()) time.sleep(self.delay) self.measure_contacts() self.create_plot() #end init #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- def measure_contacts(self): # r_12 print('measure r_12') self.k2700.openChannels('126, 127, 128, 129, 130') print(self.k2700.get_closedChannels()) self.r_12 = self.checkIV('A','B') self.k2700.closeChannels('126, 127, 128, 129, 130') print(self.k2700.get_closedChannels()) print "r12: %f Ohm" % (self.r_12) time.sleep(self.delay) # r_13 print('measure r_13') self.k2700.closeChannels('119') print(self.k2700.get_closedChannels()) self.k2700.openChannels('117, 125, 126, 128, 129, 130') print(self.k2700.get_closedChannels()) self.r_13 = self.checkIV('A','C') self.k2700.closeChannels('117, 125, 126, 128, 129, 130') print(self.k2700.get_closedChannels()) self.k2700.openChannels('119') print(self.k2700.get_closedChannels()) print "r13: %f Ohm" % (self.r_13) time.sleep(self.delay) # r_24 print('measure r_24') self.k2700.closeChannels('120') print(self.k2700.get_closedChannels()) self.k2700.openChannels('117, 125, 126, 127, 129, 130') print(self.k2700.get_closedChannels()) self.r_24 = self.checkIV('B','D') self.k2700.closeChannels('117, 125, 126, 127, 129, 130') print(self.k2700.get_closedChannels()) self.k2700.openChannels('120') print(self.k2700.get_closedChannels()) print "r24: %f Ohm" % (self.r_24) time.sleep(self.delay) # r_34 print('measure r_34') self.k2700.closeChannels('118') print(self.k2700.get_closedChannels()) self.k2700.openChannels('117, 125, 127, 128, 129, 130') print(self.k2700.get_closedChannels()) self.r_34 = self.checkIV('C','D') self.k2700.closeChannels('117, 125, 127, 128, 129, 130') print(self.k2700.get_closedChannels()) self.k2700.openChannels('118') print(self.k2700.get_closedChannels()) print "r34: %f Ohm" % (self.r_34) #end def #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- def checkIV(self,p1,p2): print('check IV') n = 2 I = [1000*self.current*(x)/n for x in range(-n,n+1)] V = [] for i in I: self.k2400.turn_source_on() self.k2400.set_current(float(i)/1000) time.sleep(self.delay) v = float( self.k2182.fetch() ) print 'i: %f\nv: %f'%(i,v) V.append(v) time.sleep(self.delay) #end for fit = self.polyfit(V,I,1) self.Data[p1+p2] = fit self.Data[p1+p2]['current'] = I self.Data[p1+p2]['voltage'] = V r = (fit['polynomial'][0])**-1 return r #end def #-------------------------------------------------------------------------- #-------------------------------------------------------------------------- def polyfit(self, x, y, degree): ''' Returns the polynomial fit for x and y of degree degree along with the r^2 and the temperature, all in dictionary form. ''' results = {} coeffs = np.polyfit(x, y, degree) # Polynomial Coefficients results['polynomial'] = coeffs.tolist() # Calculate coefficient of determination (r-squared): p = np.poly1d(coeffs) # fitted values: yhat = p(x) # or [p(z) for z in x] # mean of values: ybar = np.sum(y)/len(y) # or sum(y)/len(y) # regression sum of squares: ssreg = np.sum((yhat-ybar)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat]) # total sum of squares: sstot = np.sum((y - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y]) results['r-squared'] = ssreg / sstot return results #end def #-------------------------------------------------------------------------- def create_plot(self): plt.figure(num='IV Curves', figsize=(12,9),dpi=100) fitData = {} sp = 221 for key in self.Data.keys(): fitData[key] = {} i = np.poly1d(self.Data[key]['polynomial']) v = np.linspace(min(self.Data[key]['voltage']), max(self.Data[key]['voltage']), 500) fitData[key]['current'] = i(v) fitData[key]['voltage'] = v fitData[key]['equation'] = 'I = %.4f*(V) + %.4f' % (self.Data[key]['polynomial'][0], self.Data[key]['polynomial'][1]) plt.subplot(sp) sp = sp + 1 plt.plot(self.Data[key]['voltage'],self.Data[key]['current'],'r.',fitData[key]['voltage'],fitData[key]['current'],'b--') plt.xlabel("V (V)") plt.ylabel("I (mA)") plt.title('IV curve - '+key) plt.legend(('i-v data','fit: '+fitData[key]['equation']),loc=4,fontsize=10) #plt.axis([ , , , ]) plt.grid(True) #end for #fig.savefig('%s.png' % (plot_folder + title) , dpi=dpi) #plt.savefig('%s.png' % ('IV Curves') ) plt.show() #end def #end class ############################################################################### #============================================================================== def main(): sp = Setup() checkIV = CheckIV() #end def if __name__ == '__main__': main() #end if
gpl-3.0
walshjon/openmc
openmc/arithmetic.py
1
27765
import sys import copy from collections import Iterable import numpy as np import pandas as pd import openmc from openmc.filter import _FILTER_TYPES import openmc.checkvalue as cv # Acceptable tally arithmetic binary operations _TALLY_ARITHMETIC_OPS = ['+', '-', '*', '/', '^'] # Acceptable tally aggregation operations _TALLY_AGGREGATE_OPS = ['sum', 'avg'] class CrossScore(object): """A special-purpose tally score used to encapsulate all combinations of two tally's scores as an outer product for tally arithmetic. Parameters ---------- left_score : str or CrossScore The left score in the outer product right_score : str or CrossScore The right score in the outer product binary_op : str The tally arithmetic binary operator (e.g., '+', '-', etc.) used to combine two tally's scores with this CrossNuclide Attributes ---------- left_score : str or CrossScore The left score in the outer product right_score : str or CrossScore The right score in the outer product binary_op : str The tally arithmetic binary operator (e.g., '+', '-', etc.) used to combine two tally's scores with this CrossScore """ def __init__(self, left_score=None, right_score=None, binary_op=None): self._left_score = None self._right_score = None self._binary_op = None if left_score is not None: self.left_score = left_score if right_score is not None: self.right_score = right_score if binary_op is not None: self.binary_op = binary_op def __hash__(self): return hash(repr(self)) def __eq__(self, other): return str(other) == str(self) def __ne__(self, other): return not self == other def __repr__(self): string = '({0} {1} {2})'.format(self.left_score, self.binary_op, self.right_score) return string @property def left_score(self): return self._left_score @property def right_score(self): return self._right_score @property def binary_op(self): return self._binary_op @left_score.setter def left_score(self, left_score): cv.check_type('left_score', left_score, (str, CrossScore, AggregateScore)) self._left_score = left_score @right_score.setter def right_score(self, right_score): cv.check_type('right_score', right_score, (str, CrossScore, AggregateScore)) self._right_score = right_score @binary_op.setter def binary_op(self, binary_op): cv.check_type('binary_op', binary_op, str) cv.check_value('binary_op', binary_op, _TALLY_ARITHMETIC_OPS) self._binary_op = binary_op class CrossNuclide(object): """A special-purpose nuclide used to encapsulate all combinations of two tally's nuclides as an outer product for tally arithmetic. Parameters ---------- left_nuclide : openmc.Nuclide or CrossNuclide The left nuclide in the outer product right_nuclide : openmc.Nuclide or CrossNuclide The right nuclide in the outer product binary_op : str The tally arithmetic binary operator (e.g., '+', '-', etc.) used to combine two tally's nuclides with this CrossNuclide Attributes ---------- left_nuclide : openmc.Nuclide or CrossNuclide The left nuclide in the outer product right_nuclide : openmc.Nuclide or CrossNuclide The right nuclide in the outer product binary_op : str The tally arithmetic binary operator (e.g., '+', '-', etc.) used to combine two tally's nuclides with this CrossNuclide """ def __init__(self, left_nuclide=None, right_nuclide=None, binary_op=None): self._left_nuclide = None self._right_nuclide = None self._binary_op = None if left_nuclide is not None: self.left_nuclide = left_nuclide if right_nuclide is not None: self.right_nuclide = right_nuclide if binary_op is not None: self.binary_op = binary_op def __hash__(self): return hash(repr(self)) def __eq__(self, other): return str(other) == str(self) def __ne__(self, other): return not self == other def __repr__(self): return self.name @property def left_nuclide(self): return self._left_nuclide @property def right_nuclide(self): return self._right_nuclide @property def binary_op(self): return self._binary_op @property def name(self): string = '' # If the Summary was linked, the left nuclide is a Nuclide object if isinstance(self.left_nuclide, openmc.Nuclide): string += '(' + self.left_nuclide.name # If the Summary was not linked, the left nuclide is the ZAID else: string += '(' + str(self.left_nuclide) string += ' ' + self.binary_op + ' ' # If the Summary was linked, the right nuclide is a Nuclide object if isinstance(self.right_nuclide, openmc.Nuclide): string += self.right_nuclide.name + ')' # If the Summary was not linked, the right nuclide is the ZAID else: string += str(self.right_nuclide) + ')' return string @left_nuclide.setter def left_nuclide(self, left_nuclide): cv.check_type('left_nuclide', left_nuclide, (openmc.Nuclide, CrossNuclide, AggregateNuclide)) self._left_nuclide = left_nuclide @right_nuclide.setter def right_nuclide(self, right_nuclide): cv.check_type('right_nuclide', right_nuclide, (openmc.Nuclide, CrossNuclide, AggregateNuclide)) self._right_nuclide = right_nuclide @binary_op.setter def binary_op(self, binary_op): cv.check_type('binary_op', binary_op, str) cv.check_value('binary_op', binary_op, _TALLY_ARITHMETIC_OPS) self._binary_op = binary_op class CrossFilter(object): """A special-purpose filter used to encapsulate all combinations of two tally's filter bins as an outer product for tally arithmetic. Parameters ---------- left_filter : Filter or CrossFilter The left filter in the outer product right_filter : Filter or CrossFilter The right filter in the outer product binary_op : str The tally arithmetic binary operator (e.g., '+', '-', etc.) used to combine two tally's filter bins with this CrossFilter Attributes ---------- type : str The type of the crossfilter (e.g., 'energy / energy') left_filter : Filter or CrossFilter The left filter in the outer product right_filter : Filter or CrossFilter The right filter in the outer product binary_op : str The tally arithmetic binary operator (e.g., '+', '-', etc.) used to combine two tally's filter bins with this CrossFilter bins : dict of Iterable A dictionary of the bins from each filter keyed by the types of the left / right filters num_bins : Integral The number of filter bins (always 1 if aggregate_filter is defined) """ def __init__(self, left_filter=None, right_filter=None, binary_op=None): left_type = left_filter.type right_type = right_filter.type self._type = '({0} {1} {2})'.format(left_type, binary_op, right_type) self._bins = {} self._left_filter = None self._right_filter = None self._binary_op = None if left_filter is not None: self.left_filter = left_filter self._bins['left'] = left_filter.bins if right_filter is not None: self.right_filter = right_filter self._bins['right'] = right_filter.bins if binary_op is not None: self.binary_op = binary_op def __hash__(self): return hash((self.left_filter, self.right_filter)) def __eq__(self, other): return str(other) == str(self) def __ne__(self, other): return not self == other def __repr__(self): string = 'CrossFilter\n' filter_type = '({0} {1} {2})'.format(self.left_filter.type, self.binary_op, self.right_filter.type) filter_bins = '({0} {1} {2})'.format(self.left_filter.bins, self.binary_op, self.right_filter.bins) string += '{0: <16}{1}{2}\n'.format('\tType', '=\t', filter_type) string += '{0: <16}{1}{2}\n'.format('\tBins', '=\t', filter_bins) return string @property def left_filter(self): return self._left_filter @property def right_filter(self): return self._right_filter @property def binary_op(self): return self._binary_op @property def type(self): return self._type @property def bins(self): return self._bins['left'], self._bins['right'] @property def num_bins(self): if self.left_filter is not None and self.right_filter is not None: return self.left_filter.num_bins * self.right_filter.num_bins else: return 0 @type.setter def type(self, filter_type): if filter_type not in _FILTER_TYPES: msg = 'Unable to set CrossFilter type to "{0}" since it ' \ 'is not one of the supported types'.format(filter_type) raise ValueError(msg) self._type = filter_type @left_filter.setter def left_filter(self, left_filter): cv.check_type('left_filter', left_filter, (openmc.Filter, CrossFilter, AggregateFilter)) self._left_filter = left_filter self._bins['left'] = left_filter.bins @right_filter.setter def right_filter(self, right_filter): cv.check_type('right_filter', right_filter, (openmc.Filter, CrossFilter, AggregateFilter)) self._right_filter = right_filter self._bins['right'] = right_filter.bins @binary_op.setter def binary_op(self, binary_op): cv.check_type('binary_op', binary_op, str) cv.check_value('binary_op', binary_op, _TALLY_ARITHMETIC_OPS) self._binary_op = binary_op def get_bin_index(self, filter_bin): """Returns the index in the CrossFilter for some bin. Parameters ---------- filter_bin : 2-tuple A 2-tuple where each value corresponds to the bin of interest in the left and right filter, respectively. A bin is the integer ID for 'material', 'surface', 'cell', 'cellborn', and 'universe' Filters. The bin is an integer for the cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of floats for 'energy' and 'energyout' filters corresponding to the energy boundaries of the bin of interest. The bin is a (x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of interest. Returns ------- filter_index : Integral The index in the Tally data array for this filter bin. """ left_index = self.left_filter.get_bin_index(filter_bin[0]) right_index = self.right_filter.get_bin_index(filter_bin[0]) filter_index = left_index * self.right_filter.num_bins + right_index return filter_index def get_pandas_dataframe(self, data_size, summary=None): """Builds a Pandas DataFrame for the CrossFilter's bins. This method constructs a Pandas DataFrame object for the CrossFilter with columns annotated by filter bin information. This is a helper method for the Tally.get_pandas_dataframe(...) method. This method recursively builds and concatenates Pandas DataFrames for the left and right filters and crossfilters. This capability has been tested for Pandas >=0.13.1. However, it is recommended to use v0.16 or newer versions of Pandas since this method uses Pandas' Multi-index functionality. Parameters ---------- data_size : Integral The total number of bins in the tally corresponding to this filter summary : None or Summary An optional Summary object to be used to construct columns for distribcell tally filters (default is None). The geometric information in the Summary object is embedded into a Multi-index column with a geometric "path" to each distribcell instance. Returns ------- pandas.DataFrame A Pandas DataFrame with columns of strings that characterize the crossfilter's bins. Each entry in the DataFrame will include one or more binary operations used to construct the crossfilter's bins. The number of rows in the DataFrame is the same as the total number of bins in the corresponding tally, with the filter bins appropriately tiled to map to the corresponding tally bins. See also -------- Tally.get_pandas_dataframe(), Filter.get_pandas_dataframe() """ # If left and right filters are identical, do not combine bins if self.left_filter == self.right_filter: df = self.left_filter.get_pandas_dataframe(data_size, summary) # If left and right filters are different, combine their bins else: left_df = self.left_filter.get_pandas_dataframe(data_size, summary) right_df = self.right_filter.get_pandas_dataframe(data_size, summary) left_df = left_df.astype(str) right_df = right_df.astype(str) df = '(' + left_df + ' ' + self.binary_op + ' ' + right_df + ')' return df class AggregateScore(object): """A special-purpose tally score used to encapsulate an aggregate of a subset or all of tally's scores for tally aggregation. Parameters ---------- scores : Iterable of str or CrossScore The scores included in the aggregation aggregate_op : str The tally aggregation operator (e.g., 'sum', 'avg', etc.) used to aggregate across a tally's scores with this AggregateScore Attributes ---------- scores : Iterable of str or CrossScore The scores included in the aggregation aggregate_op : str The tally aggregation operator (e.g., 'sum', 'avg', etc.) used to aggregate across a tally's scores with this AggregateScore """ def __init__(self, scores=None, aggregate_op=None): self._scores = None self._aggregate_op = None if scores is not None: self.scores = scores if aggregate_op is not None: self.aggregate_op = aggregate_op def __hash__(self): return hash(repr(self)) def __eq__(self, other): return str(other) == str(self) def __ne__(self, other): return not self == other def __repr__(self): string = ', '.join(map(str, self.scores)) string = '{0}({1})'.format(self.aggregate_op, string) return string @property def scores(self): return self._scores @property def aggregate_op(self): return self._aggregate_op @property def name(self): # Append each score in the aggregate to the string string = '(' + ', '.join(self.scores) + ')' return string @scores.setter def scores(self, scores): cv.check_iterable_type('scores', scores, str) self._scores = scores @aggregate_op.setter def aggregate_op(self, aggregate_op): cv.check_type('aggregate_op', aggregate_op, (str, CrossScore)) cv.check_value('aggregate_op', aggregate_op, _TALLY_AGGREGATE_OPS) self._aggregate_op = aggregate_op class AggregateNuclide(object): """A special-purpose tally nuclide used to encapsulate an aggregate of a subset or all of tally's nuclides for tally aggregation. Parameters ---------- nuclides : Iterable of str or openmc.Nuclide or CrossNuclide The nuclides included in the aggregation aggregate_op : str The tally aggregation operator (e.g., 'sum', 'avg', etc.) used to aggregate across a tally's nuclides with this AggregateNuclide Attributes ---------- nuclides : Iterable of str or openmc.Nuclide or CrossNuclide The nuclides included in the aggregation aggregate_op : str The tally aggregation operator (e.g., 'sum', 'avg', etc.) used to aggregate across a tally's nuclides with this AggregateNuclide """ def __init__(self, nuclides=None, aggregate_op=None): self._nuclides = None self._aggregate_op = None if nuclides is not None: self.nuclides = nuclides if aggregate_op is not None: self.aggregate_op = aggregate_op def __hash__(self): return hash(repr(self)) def __eq__(self, other): return str(other) == str(self) def __ne__(self, other): return not self == other def __repr__(self): # Append each nuclide in the aggregate to the string string = '{0}('.format(self.aggregate_op) names = [nuclide.name if isinstance(nuclide, openmc.Nuclide) else str(nuclide) for nuclide in self.nuclides] string += ', '.join(map(str, names)) + ')' return string @property def nuclides(self): return self._nuclides @property def aggregate_op(self): return self._aggregate_op @property def name(self): # Append each nuclide in the aggregate to the string names = [nuclide.name if isinstance(nuclide, openmc.Nuclide) else str(nuclide) for nuclide in self.nuclides] string = '(' + ', '.join(map(str, names)) + ')' return string @nuclides.setter def nuclides(self, nuclides): cv.check_iterable_type('nuclides', nuclides, (str, CrossNuclide)) self._nuclides = nuclides @aggregate_op.setter def aggregate_op(self, aggregate_op): cv.check_type('aggregate_op', aggregate_op, str) cv.check_value('aggregate_op', aggregate_op, _TALLY_AGGREGATE_OPS) self._aggregate_op = aggregate_op class AggregateFilter(object): """A special-purpose tally filter used to encapsulate an aggregate of a subset or all of a tally filter's bins for tally aggregation. Parameters ---------- aggregate_filter : Filter or CrossFilter The filter included in the aggregation bins : Iterable of tuple The filter bins included in the aggregation aggregate_op : str The tally aggregation operator (e.g., 'sum', 'avg', etc.) used to aggregate across a tally filter's bins with this AggregateFilter Attributes ---------- type : str The type of the aggregatefilter (e.g., 'sum(energy)', 'sum(cell)') aggregate_filter : filter The filter included in the aggregation aggregate_op : str The tally aggregation operator (e.g., 'sum', 'avg', etc.) used to aggregate across a tally filter's bins with this AggregateFilter bins : Iterable of tuple The filter bins included in the aggregation num_bins : Integral The number of filter bins (always 1 if aggregate_filter is defined) """ def __init__(self, aggregate_filter=None, bins=None, aggregate_op=None): self._type = '{0}({1})'.format(aggregate_op, aggregate_filter.short_name.lower()) self._bins = None self._aggregate_filter = None self._aggregate_op = None if aggregate_filter is not None: self.aggregate_filter = aggregate_filter if bins is not None: self.bins = bins if aggregate_op is not None: self.aggregate_op = aggregate_op def __hash__(self): return hash(repr(self)) def __eq__(self, other): return str(other) == str(self) def __ne__(self, other): return not self == other def __gt__(self, other): if self.type != other.type: if self.aggregate_filter.type in _FILTER_TYPES and \ other.aggregate_filter.type in _FILTER_TYPES: delta = _FILTER_TYPES.index(self.aggregate_filter.type) - \ _FILTER_TYPES.index(other.aggregate_filter.type) return delta > 0 else: return False else: return False def __lt__(self, other): return not self > other def __repr__(self): string = 'AggregateFilter\n' string += '{0: <16}{1}{2}\n'.format('\tType', '=\t', self.type) string += '{0: <16}{1}{2}\n'.format('\tBins', '=\t', self.bins) return string @property def aggregate_filter(self): return self._aggregate_filter @property def aggregate_op(self): return self._aggregate_op @property def type(self): return self._type @property def bins(self): return self._bins @property def num_bins(self): return len(self.bins) if self.aggregate_filter else 0 @type.setter def type(self, filter_type): if filter_type not in _FILTER_TYPES: msg = 'Unable to set AggregateFilter type to "{0}" since it ' \ 'is not one of the supported types'.format(filter_type) raise ValueError(msg) self._type = filter_type @aggregate_filter.setter def aggregate_filter(self, aggregate_filter): cv.check_type('aggregate_filter', aggregate_filter, (openmc.Filter, CrossFilter)) self._aggregate_filter = aggregate_filter @bins.setter def bins(self, bins): cv.check_iterable_type('bins', bins, Iterable) self._bins = list(map(tuple, bins)) @aggregate_op.setter def aggregate_op(self, aggregate_op): cv.check_type('aggregate_op', aggregate_op, str) cv.check_value('aggregate_op', aggregate_op, _TALLY_AGGREGATE_OPS) self._aggregate_op = aggregate_op def get_bin_index(self, filter_bin): """Returns the index in the AggregateFilter for some bin. Parameters ---------- filter_bin : Integral or tuple of Real A tuple of value(s) corresponding to the bin of interest in the aggregated filter. The bin is the integer ID for 'material', 'surface', 'cell', 'cellborn', and 'universe' Filters. The bin is the integer cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of floats for 'energy' and 'energyout' filters corresponding to the energy boundaries of the bin of interest. The bin is a (x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of interest. Returns ------- filter_index : Integral The index in the Tally data array for this filter bin. For an AggregateTally the filter bin index is always unity. Raises ------ ValueError When the filter_bin is not part of the aggregated filter's bins """ if filter_bin not in self.bins: msg = 'Unable to get the bin index for AggregateFilter since ' \ '"{0}" is not one of the bins'.format(filter_bin) raise ValueError(msg) else: return self.bins.index(filter_bin) def get_pandas_dataframe(self, data_size, stride, summary=None, **kwargs): """Builds a Pandas DataFrame for the AggregateFilter's bins. This method constructs a Pandas DataFrame object for the AggregateFilter with columns annotated by filter bin information. This is a helper method for the Tally.get_pandas_dataframe(...) method. Parameters ---------- data_size : int The total number of bins in the tally corresponding to this filter stride : int Stride in memory for the filter summary : None or Summary An optional Summary object to be used to construct columns for distribcell tally filters (default is None). NOTE: This parameter is not used by the AggregateFilter and simply mirrors the method signature for the CrossFilter. Returns ------- pandas.DataFrame A Pandas DataFrame with columns of strings that characterize the aggregatefilter's bins. Each entry in the DataFrame will include one or more aggregation operations used to construct the aggregatefilter's bins. The number of rows in the DataFrame is the same as the total number of bins in the corresponding tally, with the filter bins appropriately tiled to map to the corresponding tally bins. See also -------- Tally.get_pandas_dataframe(), Filter.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe() """ # Create NumPy array of the bin tuples for repeating / tiling filter_bins = np.empty(self.num_bins, dtype=tuple) for i, bin in enumerate(self.bins): filter_bins[i] = bin # Repeat and tile bins as needed for DataFrame filter_bins = np.repeat(filter_bins, stride) tile_factor = data_size / len(filter_bins) filter_bins = np.tile(filter_bins, tile_factor) # Create DataFrame with aggregated bins df = pd.DataFrame({self.type: filter_bins}) return df def can_merge(self, other): """Determine if AggregateFilter can be merged with another. Parameters ---------- other : AggregateFilter Filter to compare with Returns ------- bool Whether the filter can be merged """ if not isinstance(other, AggregateFilter): return False # Filters must be of the same type elif self.type != other.type: return False # None of the bins in this filter should match in the other filter for bin in self.bins: if bin in other.bins: return False # If all conditional checks passed then filters are mergeable return True def merge(self, other): """Merge this aggregatefilter with another. Parameters ---------- other : AggregateFilter Filter to merge with Returns ------- merged_filter : AggregateFilter Filter resulting from the merge """ if not self.can_merge(other): msg = 'Unable to merge "{0}" with "{1}" ' \ 'filters'.format(self.type, other.type) raise ValueError(msg) # Create deep copy of filter to return as merged filter merged_filter = copy.deepcopy(self) # Merge unique filter bins merged_bins = self.bins + other.bins # Sort energy bin edges if 'energy' in self.type: merged_bins = sorted(merged_bins) # Assign merged bins to merged filter merged_filter.bins = list(merged_bins) return merged_filter
mit
mindriot101/bokeh
bokeh/plotting/helpers.py
3
30431
from __future__ import absolute_import from collections import Iterable, OrderedDict, Sequence import difflib import itertools import re import textwrap import warnings import numpy as np import sys from six import string_types, reraise from ..models import ( BoxSelectTool, BoxZoomTool, CategoricalAxis, MercatorAxis, TapTool, CrosshairTool, DataRange1d, DatetimeAxis, FactorRange, Grid, HelpTool, HoverTool, LassoSelectTool, Legend, LegendItem, LinearAxis, LogAxis, PanTool, ZoomInTool, ZoomOutTool, PolySelectTool, ContinuousTicker, SaveTool, Range, Range1d, UndoTool, RedoTool, ResetTool, Tool, WheelPanTool, WheelZoomTool, ColumnarDataSource, ColumnDataSource, LogScale, LinearScale, CategoricalScale, Circle, MultiLine, BoxEditTool, PointDrawTool, PolyDrawTool, PolyEditTool) from bokeh.models.markers import Marker from ..models.renderers import GlyphRenderer from ..core.properties import ColorSpec, Datetime, value, field from ..transform import stack from ..util.dependencies import import_optional from ..util.string import nice_join pd = import_optional('pandas') def _stack(stackers, spec0, spec1, **kw): for name in (spec0, spec1): if name in kw: raise ValueError("Stack property '%s' cannot appear in keyword args" % name) lengths = { len(x) for x in kw.values() if isinstance(x, (list, tuple)) } # lengths will be empty if there are no kwargs supplied at all if len(lengths) > 0: if len(lengths) != 1: raise ValueError("Keyword argument sequences for broadcasting must all be the same lengths. Got lengths: %r" % sorted(list(lengths))) if lengths.pop() != len(stackers): raise ValueError("Keyword argument sequences for broadcasting must be the same length as stackers") s0 = [] s1 = [] _kw = [] for i, val in enumerate(stackers): d = {'name': val} s0 = list(s1) s1.append(val) d[spec0] = stack(*s0) d[spec1] = stack(*s1) for k, v in kw.items(): if isinstance(v, (list, tuple)): d[k] = v[i] else: d[k] = v _kw.append(d) return _kw def _graph(node_source, edge_source, **kwargs): if not isinstance(node_source, ColumnarDataSource): try: # try converting the soruce to ColumnDataSource node_source = ColumnDataSource(node_source) except ValueError as err: msg = "Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}".format( curr_type=str(type(node_source)), err=err.message ) reraise(ValueError, ValueError(msg), sys.exc_info()[2]) if not isinstance(edge_source, ColumnarDataSource): try: # try converting the soruce to ColumnDataSource edge_source = ColumnDataSource(edge_source) except ValueError as err: msg = "Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}".format( curr_type=str(type(edge_source)), err=err.message ) reraise(ValueError, ValueError(msg), sys.exc_info()[2]) ## node stuff if any(x.startswith('node_selection_') for x in kwargs): snode_ca = _pop_colors_and_alpha(Circle, kwargs, prefix="node_selection_") else: snode_ca = None if any(x.startswith('node_hover_') for x in kwargs): hnode_ca = _pop_colors_and_alpha(Circle, kwargs, prefix="node_hover_") else: hnode_ca = None if any(x.startswith('node_muted_') for x in kwargs): mnode_ca = _pop_colors_and_alpha(Circle, kwargs, prefix="node_muted_") else: mnode_ca = None nsnode_ca = _pop_colors_and_alpha(Circle, kwargs, prefix="node_nonselection_") node_ca = _pop_colors_and_alpha(Circle, kwargs, prefix="node_") ## edge stuff if any(x.startswith('edge_selection_') for x in kwargs): sedge_ca = _pop_colors_and_alpha(MultiLine, kwargs, prefix="edge_selection_") else: sedge_ca = None if any(x.startswith('edge_hover_') for x in kwargs): hedge_ca = _pop_colors_and_alpha(MultiLine, kwargs, prefix="edge_hover_") else: hedge_ca = None if any(x.startswith('edge_muted_') for x in kwargs): medge_ca = _pop_colors_and_alpha(MultiLine, kwargs, prefix="edge_muted_") else: medge_ca = None nsedge_ca = _pop_colors_and_alpha(MultiLine, kwargs, prefix="edge_nonselection_") edge_ca = _pop_colors_and_alpha(MultiLine, kwargs, prefix="edge_") ## node stuff node_kwargs = {k.lstrip('node_'): v for k, v in kwargs.copy().items() if k.lstrip('node_') in Circle.properties()} node_glyph = _make_glyph(Circle, node_kwargs, node_ca) nsnode_glyph = _make_glyph(Circle, node_kwargs, nsnode_ca) snode_glyph = _make_glyph(Circle, node_kwargs, snode_ca) hnode_glyph = _make_glyph(Circle, node_kwargs, hnode_ca) mnode_glyph = _make_glyph(Circle, node_kwargs, mnode_ca) node_renderer = GlyphRenderer(glyph=node_glyph, nonselection_glyph=nsnode_glyph, selection_glyph=snode_glyph, hover_glyph=hnode_glyph, muted_glyph=mnode_glyph, data_source=node_source) ## edge stuff edge_kwargs = {k.lstrip('edge_'): v for k, v in kwargs.copy().items() if k.lstrip('edge_') in MultiLine.properties()} edge_glyph = _make_glyph(MultiLine, edge_kwargs, edge_ca) nsedge_glyph = _make_glyph(MultiLine, edge_kwargs, nsedge_ca) sedge_glyph = _make_glyph(MultiLine, edge_kwargs, sedge_ca) hedge_glyph = _make_glyph(MultiLine, edge_kwargs, hedge_ca) medge_glyph = _make_glyph(MultiLine, edge_kwargs, medge_ca) edge_renderer = GlyphRenderer(glyph=edge_glyph, nonselection_glyph=nsedge_glyph, selection_glyph=sedge_glyph, hover_glyph=hedge_glyph, muted_glyph=medge_glyph, data_source=edge_source) _RENDERER_ARGS = ['name', 'level', 'visible', 'x_range_name', 'y_range_name', 'selection_policy', 'inspection_policy'] renderer_kwargs = {attr: kwargs.pop(attr) for attr in _RENDERER_ARGS if attr in kwargs} renderer_kwargs["node_renderer"] = node_renderer renderer_kwargs["edge_renderer"] = edge_renderer return renderer_kwargs def get_default_color(plot=None): colors = [ "#1f77b4", "#ff7f0e", "#ffbb78", "#2ca02c", "#98df8a", "#d62728", "#ff9896", "#9467bd", "#c5b0d5", "#8c564b", "#c49c94", "#e377c2", "#f7b6d2", "#7f7f7f", "#bcbd22", "#dbdb8d", "#17becf", "#9edae5" ] if plot: renderers = plot.renderers renderers = [x for x in renderers if x.__view_model__ == "GlyphRenderer"] num_renderers = len(renderers) return colors[num_renderers] else: return colors[0] _RENDERER_ARGS = ['name', 'x_range_name', 'y_range_name', 'level', 'view', 'visible', 'muted'] def _pop_renderer_args(kwargs): result = {attr: kwargs.pop(attr) for attr in _RENDERER_ARGS if attr in kwargs} result['data_source'] = kwargs.pop('source', ColumnDataSource()) return result def _pop_colors_and_alpha(glyphclass, kwargs, prefix="", default_alpha=1.0): """ Given a kwargs dict, a prefix, and a default value, looks for different color and alpha fields of the given prefix, and fills in the default value if it doesn't exist. """ result = dict() # TODO: The need to do this and the complexity of managing this kind of # thing throughout the codebase really suggests that we need to have # a real stylesheet class, where defaults and Types can declaratively # substitute for this kind of imperative logic. color = kwargs.pop(prefix + "color", get_default_color()) for argname in ("fill_color", "line_color"): if argname not in glyphclass.properties(): continue result[argname] = kwargs.pop(prefix + argname, color) # NOTE: text fill color should really always default to black, hard coding # this here now until the stylesheet solution exists if "text_color" in glyphclass.properties(): result["text_color"] = kwargs.pop(prefix + "text_color", "black") alpha = kwargs.pop(prefix + "alpha", default_alpha) for argname in ("fill_alpha", "line_alpha", "text_alpha"): if argname not in glyphclass.properties(): continue result[argname] = kwargs.pop(prefix + argname, alpha) return result def _get_legend_item_label(kwargs): legend = kwargs.pop('legend', None) source = kwargs.get('source') legend_item_label = None if legend: if isinstance(legend, string_types): # Do the simple thing first legend_item_label = value(legend) # But if there's a source - try and do something smart if source is not None and hasattr(source, 'column_names'): if legend in source.column_names: legend_item_label = field(legend) else: legend_item_label = legend return legend_item_label _GLYPH_SOURCE_MSG = """ Expected %s to reference fields in the supplied data source. When a 'source' argument is passed to a glyph method, values that are sequences (like lists or arrays) must come from references to data columns in the source. For instance, as an example: source = ColumnDataSource(data=dict(x=a_list, y=an_array)) p.circle(x='x', y='y', source=source, ...) # pass column names and a source Alternatively, *all* data sequences may be provided as literals as long as a source is *not* provided: p.circle(x=a_list, y=an_array, ...) # pass actual sequences and no source """ def _process_sequence_literals(glyphclass, kwargs, source, is_user_source): incompatible_literal_spec_values = [] dataspecs = glyphclass.dataspecs_with_props() for var, val in kwargs.items(): # ignore things that are not iterable if not isinstance(val, Iterable): continue # pass dicts (i.e., values or fields) on as-is if isinstance(val, dict): continue # let any non-dataspecs do their own validation (e.g., line_dash properties) if var not in dataspecs: continue # strings sequences are handled by the dataspec as-is if isinstance(val, string_types): continue # similarly colorspecs handle color tuple sequences as-is if (isinstance(dataspecs[var].property, ColorSpec) and isinstance(val, tuple)): continue if isinstance(val, np.ndarray) and val.ndim != 1: raise RuntimeError("Columns need to be 1D (%s is not)" % var) if is_user_source: incompatible_literal_spec_values.append(var) else: source.add(val, name=var) kwargs[var] = var return incompatible_literal_spec_values def _make_glyph(glyphclass, kws, extra): if extra is None: return None kws = kws.copy() kws.update(extra) return glyphclass(**kws) def _update_legend(plot, legend_item_label, glyph_renderer): # Get the plot's legend legends = plot.select(type=Legend) if not legends: legend = Legend() plot.add_layout(legend) elif len(legends) == 1: legend = legends[0] else: raise RuntimeError("Plot %s configured with more than one legend renderer" % plot) # If there is an existing legend with a matching label, then put the # renderer on that (if the source matches). Otherwise add a new one. added = False for item in legend.items: if item.label == legend_item_label: if item.label.get('value'): item.renderers.append(glyph_renderer) added = True break if item.label.get('field') and \ glyph_renderer.data_source is item.renderers[0].data_source: item.renderers.append(glyph_renderer) added = True break if not added: new_item = LegendItem(label=legend_item_label, renderers=[glyph_renderer]) legend.items.append(new_item) def _get_range(range_input): if range_input is None: return DataRange1d() if pd and isinstance(range_input, pd.core.groupby.GroupBy): return FactorRange(factors=sorted(list(range_input.groups.keys()))) if isinstance(range_input, Range): return range_input if pd and isinstance(range_input, pd.Series): range_input = range_input.values if isinstance(range_input, (Sequence, np.ndarray)): if all(isinstance(x, string_types) for x in range_input): return FactorRange(factors=list(range_input)) if len(range_input) == 2: try: return Range1d(start=range_input[0], end=range_input[1]) except ValueError: # @mattpap suggests ValidationError instead pass raise ValueError("Unrecognized range input: '%s'" % str(range_input)) def _get_scale(range_input, axis_type): if isinstance(range_input, (DataRange1d, Range1d)) and axis_type in ["linear", "datetime", "mercator", "auto", None]: return LinearScale() elif isinstance(range_input, (DataRange1d, Range1d)) and axis_type == "log": return LogScale() elif isinstance(range_input, FactorRange): return CategoricalScale() else: raise ValueError("Unable to determine proper scale for: '%s'" % str(range_input)) def _get_axis_class(axis_type, range_input, dim): if axis_type is None: return None, {} elif axis_type == "linear": return LinearAxis, {} elif axis_type == "log": return LogAxis, {} elif axis_type == "datetime": return DatetimeAxis, {} elif axis_type == "mercator": return MercatorAxis, {'dimension': 'lon' if dim == 0 else 'lat'} elif axis_type == "auto": if isinstance(range_input, FactorRange): return CategoricalAxis, {} elif isinstance(range_input, Range1d): try: # Easier way to validate type of Range1d parameters Datetime.validate(Datetime(), range_input.start) return DatetimeAxis, {} except ValueError: pass return LinearAxis, {} else: raise ValueError("Unrecognized axis_type: '%r'" % axis_type) def _get_num_minor_ticks(axis_class, num_minor_ticks): if isinstance(num_minor_ticks, int): if num_minor_ticks <= 1: raise ValueError("num_minor_ticks must be > 1") return num_minor_ticks if num_minor_ticks is None: return 0 if num_minor_ticks == 'auto': if axis_class is LogAxis: return 10 return 5 _known_tools = { "pan": lambda: PanTool(dimensions='both'), "xpan": lambda: PanTool(dimensions='width'), "ypan": lambda: PanTool(dimensions='height'), "xwheel_pan": lambda: WheelPanTool(dimension="width"), "ywheel_pan": lambda: WheelPanTool(dimension="height"), "wheel_zoom": lambda: WheelZoomTool(dimensions='both'), "xwheel_zoom": lambda: WheelZoomTool(dimensions='width'), "ywheel_zoom": lambda: WheelZoomTool(dimensions='height'), "zoom_in": lambda: ZoomInTool(dimensions='both'), "xzoom_in": lambda: ZoomInTool(dimensions='width'), "yzoom_in": lambda: ZoomInTool(dimensions='height'), "zoom_out": lambda: ZoomOutTool(dimensions='both'), "xzoom_out": lambda: ZoomOutTool(dimensions='width'), "yzoom_out": lambda: ZoomOutTool(dimensions='height'), "click": lambda: TapTool(behavior="inspect"), "tap": lambda: TapTool(), "crosshair": lambda: CrosshairTool(), "box_select": lambda: BoxSelectTool(), "xbox_select": lambda: BoxSelectTool(dimensions='width'), "ybox_select": lambda: BoxSelectTool(dimensions='height'), "poly_select": lambda: PolySelectTool(), "lasso_select": lambda: LassoSelectTool(), "box_zoom": lambda: BoxZoomTool(dimensions='both'), "xbox_zoom": lambda: BoxZoomTool(dimensions='width'), "ybox_zoom": lambda: BoxZoomTool(dimensions='height'), "hover": lambda: HoverTool(tooltips=[ ("index", "$index"), ("data (x, y)", "($x, $y)"), ("screen (x, y)", "($sx, $sy)"), ]), "save": lambda: SaveTool(), "previewsave": "save", "undo": lambda: UndoTool(), "redo": lambda: RedoTool(), "reset": lambda: ResetTool(), "help": lambda: HelpTool(), "box_edit": lambda: BoxEditTool(), "point_draw": lambda: PointDrawTool(), "poly_draw": lambda: PolyDrawTool(), "poly_edit": lambda: PolyEditTool() } def _tool_from_string(name): """ Takes a string and returns a corresponding `Tool` instance. """ known_tools = sorted(_known_tools.keys()) if name in known_tools: tool_fn = _known_tools[name] if isinstance(tool_fn, string_types): tool_fn = _known_tools[tool_fn] return tool_fn() else: matches, text = difflib.get_close_matches(name.lower(), known_tools), "similar" if not matches: matches, text = known_tools, "possible" raise ValueError("unexpected tool name '%s', %s tools are %s" % (name, text, nice_join(matches))) def _process_axis_and_grid(plot, axis_type, axis_location, minor_ticks, axis_label, rng, dim): axiscls, axiskw = _get_axis_class(axis_type, rng, dim) if axiscls: # this is so we can get a ticker off the axis, even if we discard it axis = axiscls(plot=plot if axis_location else None, **axiskw) if isinstance(axis.ticker, ContinuousTicker): axis.ticker.num_minor_ticks = _get_num_minor_ticks(axiscls, minor_ticks) axis_label = axis_label if axis_label: axis.axis_label = axis_label grid = Grid(plot=plot, dimension=dim, ticker=axis.ticker); grid if axis_location is not None: getattr(plot, axis_location).append(axis) def _process_tools_arg(plot, tools, tooltips=None): """ Adds tools to the plot object Args: plot (Plot): instance of a plot object tools (seq[Tool or str]|str): list of tool types or string listing the tool names. Those are converted using the _tool_from_string function. I.e.: `wheel_zoom,box_zoom,reset`. tooltips (string or seq[tuple[str, str]], optional): tooltips to use to configure a HoverTool Returns: list of Tools objects added to plot, map of supplied string names to tools """ tool_objs = [] tool_map = {} temp_tool_str = "" repeated_tools = [] if isinstance(tools, (list, tuple)): for tool in tools: if isinstance(tool, Tool): tool_objs.append(tool) elif isinstance(tool, string_types): temp_tool_str += tool + ',' else: raise ValueError("tool should be a string or an instance of Tool class") tools = temp_tool_str for tool in re.split(r"\s*,\s*", tools.strip()): # re.split will return empty strings; ignore them. if tool == "": continue tool_obj = _tool_from_string(tool) tool_objs.append(tool_obj) tool_map[tool] = tool_obj for typename, group in itertools.groupby( sorted(tool.__class__.__name__ for tool in tool_objs)): if len(list(group)) > 1: repeated_tools.append(typename) if repeated_tools: warnings.warn("%s are being repeated" % ",".join(repeated_tools)) if tooltips is not None: for tool_obj in tool_objs: if isinstance(tool_obj, HoverTool): tool_obj.tooltips = tooltips break else: tool_objs.append(HoverTool(tooltips=tooltips)) return tool_objs, tool_map def _process_active_tools(toolbar, tool_map, active_drag, active_inspect, active_scroll, active_tap): """ Adds tools to the plot object Args: toolbar (Toolbar): instance of a Toolbar object tools_map (dict[str]|Tool): tool_map from _process_tools_arg active_drag (str or Tool): the tool to set active for drag active_inspect (str or Tool): the tool to set active for inspect active_scroll (str or Tool): the tool to set active for scroll active_tap (str or Tool): the tool to set active for tap Returns: None Note: This function sets properties on Toolbar """ if active_drag in ['auto', None] or isinstance(active_drag, Tool): toolbar.active_drag = active_drag elif active_drag in tool_map: toolbar.active_drag = tool_map[active_drag] else: raise ValueError("Got unknown %r for 'active_drag', which was not a string supplied in 'tools' argument" % active_drag) if active_inspect in ['auto', None] or isinstance(active_inspect, Tool) or all(isinstance(t, Tool) for t in active_inspect): toolbar.active_inspect = active_inspect elif active_inspect in tool_map: toolbar.active_inspect = tool_map[active_inspect] else: raise ValueError("Got unknown %r for 'active_inspect', which was not a string supplied in 'tools' argument" % active_scroll) if active_scroll in ['auto', None] or isinstance(active_scroll, Tool): toolbar.active_scroll = active_scroll elif active_scroll in tool_map: toolbar.active_scroll = tool_map[active_scroll] else: raise ValueError("Got unknown %r for 'active_scroll', which was not a string supplied in 'tools' argument" % active_scroll) if active_tap in ['auto', None] or isinstance(active_tap, Tool): toolbar.active_tap = active_tap elif active_tap in tool_map: toolbar.active_tap = tool_map[active_tap] else: raise ValueError("Got unknown %r for 'active_tap', which was not a string supplied in 'tools' argument" % active_tap) def _get_argspecs(glyphclass): argspecs = OrderedDict() for arg in glyphclass._args: spec = {} descriptor = getattr(glyphclass, arg) # running python with -OO will discard docstrings -> __doc__ is None if descriptor.__doc__: spec['desc'] = "\n ".join(textwrap.dedent(descriptor.__doc__).split("\n")) else: spec['desc'] = "" spec['default'] = descriptor.class_default(glyphclass) spec['type'] = descriptor.property._sphinx_type() argspecs[arg] = spec return argspecs # This template generates the following: # # def foo(self, x, y=10, kwargs): # kwargs['x'] = x # kwargs['y'] = y # return func(self, **kwargs) _sigfunc_template = """ def %s(self, %s, **kwargs): %s return func(self, **kwargs) """ def _get_sigfunc(func_name, func, argspecs): # This code is to wrap the generic func(*args, **kw) glyph method so that # a much better signature is available to users. E.g., for ``square`` we have: # # Signature: p.square(x, y, size=4, angle=0.0, **kwargs) # # which provides descriptive names for positional args, as well as any defaults func_args_with_defaults = [] for arg, spec in argspecs.items(): if spec['default'] is None: func_args_with_defaults.append(arg) else: func_args_with_defaults.append("%s=%r" % (arg, spec['default'])) args_text = ", ".join(func_args_with_defaults) kwargs_assign_text = "\n".join(" kwargs[%r] = %s" % (x, x) for x in argspecs) func_text = _sigfunc_template % (func_name, args_text, kwargs_assign_text) func_code = compile(func_text, "fakesource", "exec") func_globals = {} eval(func_code, {"func": func}, func_globals) return func_globals[func_name] _arg_template = """ %s (%s) : %s (default: %r) """ _doc_template = """ Configure and add :class:`~bokeh.models.%s.%s` glyphs to this Figure. Args: %s Keyword Args: %s Other Parameters: alpha (float) : an alias to set all alpha keyword args at once color (Color) : an alias to set all color keyword args at once source (ColumnDataSource) : a user supplied data source legend (str) : a legend tag for this glyph x_range_name (str) : name an extra range to use for mapping x-coordinates y_range_name (str) : name an extra range to use for mapping y-coordinates level (Enum) : control the render level order for this glyph It is also possible to set the color and alpha parameters of a "nonselection" glyph. To do so, prefix any visual parameter with ``'nonselection_'``. For example, pass ``nonselection_alpha`` or ``nonselection_fill_alpha``. Returns: GlyphRenderer """ def _add_sigfunc_info(func, argspecs, glyphclass, extra_docs): func.__name__ = glyphclass.__name__ omissions = {'js_event_callbacks', 'js_property_callbacks', 'subscribed_events'} kwlines = [] kws = glyphclass.properties() - set(argspecs) for kw in kws: # these are not really useful, and should also really be private, just skip them if kw in omissions: continue descriptor = getattr(glyphclass, kw) typ = descriptor.property._sphinx_type() if descriptor.__doc__: desc = "\n ".join(textwrap.dedent(descriptor.__doc__).split("\n")) else: desc = "" kwlines.append(_arg_template % (kw, typ, desc, descriptor.class_default(glyphclass))) extra_kws = getattr(glyphclass, '_extra_kws', {}) for kw, (typ, desc) in extra_kws.items(): kwlines.append(" %s (%s) : %s" % (kw, typ, desc)) kwlines.sort() arglines = [] for arg, spec in argspecs.items(): arglines.append(_arg_template % (arg, spec['type'], spec['desc'], spec['default'])) mod = "markers" if issubclass(glyphclass, Marker) else "glyphs" func.__doc__ = _doc_template % (mod, func.__name__, "\n".join(arglines), "\n".join(kwlines)) if extra_docs: func.__doc__ += extra_docs def _glyph_function(glyphclass, extra_docs=None): def func(self, **kwargs): # Convert data source, if necesary is_user_source = kwargs.get('source', None) is not None if is_user_source: source = kwargs['source'] if not isinstance(source, ColumnarDataSource): try: # try converting the soruce to ColumnDataSource source = ColumnDataSource(source) except ValueError as err: msg = "Failed to auto-convert {curr_type} to ColumnDataSource.\n Original error: {err}".format( curr_type=str(type(source)), err=err.message ) reraise(ValueError, ValueError(msg), sys.exc_info()[2]) # update reddered_kws so that others can use the new source kwargs['source'] = source # Process legend kwargs and remove legend before we get going legend_item_label = _get_legend_item_label(kwargs) # Need to check if user source is present before _pop_renderer_args renderer_kws = _pop_renderer_args(kwargs) source = renderer_kws['data_source'] # Assign global_alpha from alpha if glyph type is an image if 'alpha' in kwargs and glyphclass.__name__ in ('Image', 'ImageRGBA', 'ImageURL'): kwargs['global_alpha'] = kwargs['alpha'] # handle the main glyph, need to process literals glyph_ca = _pop_colors_and_alpha(glyphclass, kwargs) incompatible_literal_spec_values = [] incompatible_literal_spec_values += _process_sequence_literals(glyphclass, kwargs, source, is_user_source) incompatible_literal_spec_values += _process_sequence_literals(glyphclass, glyph_ca, source, is_user_source) if incompatible_literal_spec_values: raise RuntimeError(_GLYPH_SOURCE_MSG % nice_join(incompatible_literal_spec_values, conjuction="and")) # handle the nonselection glyph, we always set one nsglyph_ca = _pop_colors_and_alpha(glyphclass, kwargs, prefix='nonselection_', default_alpha=0.1) # handle the selection glyph, if any properties were given if any(x.startswith('selection_') for x in kwargs): sglyph_ca = _pop_colors_and_alpha(glyphclass, kwargs, prefix='selection_') else: sglyph_ca = None # handle the hover glyph, if any properties were given if any(x.startswith('hover_') for x in kwargs): hglyph_ca = _pop_colors_and_alpha(glyphclass, kwargs, prefix='hover_') else: hglyph_ca = None # handle the mute glyph, if any properties were given if any(x.startswith('muted_') for x in kwargs): mglyph_ca = _pop_colors_and_alpha(glyphclass, kwargs, prefix='muted_') else: mglyph_ca = None glyph = _make_glyph(glyphclass, kwargs, glyph_ca) nsglyph = _make_glyph(glyphclass, kwargs, nsglyph_ca) sglyph = _make_glyph(glyphclass, kwargs, sglyph_ca) hglyph = _make_glyph(glyphclass, kwargs, hglyph_ca) mglyph = _make_glyph(glyphclass, kwargs, mglyph_ca) glyph_renderer = GlyphRenderer(glyph=glyph, nonselection_glyph=nsglyph, selection_glyph=sglyph, hover_glyph=hglyph, muted_glyph=mglyph, **renderer_kws) if legend_item_label: _update_legend(self, legend_item_label, glyph_renderer) self.renderers.append(glyph_renderer) return glyph_renderer argspecs = _get_argspecs(glyphclass) sigfunc = _get_sigfunc(glyphclass.__name__.lower(), func, argspecs) sigfunc.glyph_method = True _add_sigfunc_info(sigfunc, argspecs, glyphclass, extra_docs) return sigfunc
bsd-3-clause
liberatorqjw/scikit-learn
examples/manifold/plot_swissroll.py
330
1446
""" =================================== Swiss Roll reduction with LLE =================================== An illustration of Swiss Roll reduction with locally linear embedding """ # Author: Fabian Pedregosa -- <[email protected]> # License: BSD 3 clause (C) INRIA 2011 print(__doc__) import matplotlib.pyplot as plt # This import is needed to modify the way figure behaves from mpl_toolkits.mplot3d import Axes3D Axes3D #---------------------------------------------------------------------- # Locally linear embedding of the swiss roll from sklearn import manifold, datasets X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500) print("Computing LLE embedding") X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12, n_components=2) print("Done. Reconstruction error: %g" % err) #---------------------------------------------------------------------- # Plot result fig = plt.figure() try: # compatibility matplotlib < 1.0 ax = fig.add_subplot(211, projection='3d') ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral) except: ax = fig.add_subplot(211) ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral) ax.set_title("Original data") ax = fig.add_subplot(212) ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral) plt.axis('tight') plt.xticks([]), plt.yticks([]) plt.title('Projected data') plt.show()
bsd-3-clause
saiwing-yeung/scikit-learn
sklearn/utils/__init__.py
17
12898
""" The :mod:`sklearn.utils` module includes various utilities. """ from collections import Sequence import numpy as np from scipy.sparse import issparse import warnings from .murmurhash import murmurhash3_32 from .validation import (as_float_array, assert_all_finite, check_random_state, column_or_1d, check_array, check_consistent_length, check_X_y, indexable, check_symmetric) from .deprecation import deprecated from .class_weight import compute_class_weight, compute_sample_weight from ..externals.joblib import cpu_count from ..exceptions import ConvergenceWarning as _ConvergenceWarning from ..exceptions import DataConversionWarning @deprecated("ConvergenceWarning has been moved into the sklearn.exceptions " "module. It will not be available here from version 0.19") class ConvergenceWarning(_ConvergenceWarning): pass __all__ = ["murmurhash3_32", "as_float_array", "assert_all_finite", "check_array", "check_random_state", "compute_class_weight", "compute_sample_weight", "column_or_1d", "safe_indexing", "check_consistent_length", "check_X_y", 'indexable', "check_symmetric"] def safe_mask(X, mask): """Return a mask which is safe to use on X. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask: array Mask to be used on X. Returns ------- mask """ mask = np.asarray(mask) if np.issubdtype(mask.dtype, np.int): return mask if hasattr(X, "toarray"): ind = np.arange(mask.shape[0]) mask = ind[mask] return mask def axis0_safe_slice(X, mask, len_mask): """ This mask is safer than safe_mask since it returns an empty array, when a sparse matrix is sliced with a boolean mask with all False, instead of raising an unhelpful error in older versions of SciPy. See: https://github.com/scipy/scipy/issues/5361 Also note that we can avoid doing the dot product by checking if the len_mask is not zero in _huber_loss_and_gradient but this is not going to be the bottleneck, since the number of outliers and non_outliers are typically non-zero and it makes the code tougher to follow. """ if len_mask != 0: return X[safe_mask(X, mask), :] return np.zeros(shape=(0, X.shape[1])) def safe_indexing(X, indices): """Return items or rows from X using indices. Allows simple indexing of lists or arrays. Parameters ---------- X : array-like, sparse-matrix, list. Data from which to sample rows or items. indices : array-like, list Indices according to which X will be subsampled. """ if hasattr(X, "iloc"): # Pandas Dataframes and Series try: return X.iloc[indices] except ValueError: # Cython typed memoryviews internally used in pandas do not support # readonly buffers. warnings.warn("Copying input dataframe for slicing.", DataConversionWarning) return X.copy().iloc[indices] elif hasattr(X, "shape"): if hasattr(X, 'take') and (hasattr(indices, 'dtype') and indices.dtype.kind == 'i'): # This is often substantially faster than X[indices] return X.take(indices, axis=0) else: return X[indices] else: return [X[idx] for idx in indices] def resample(*arrays, **options): """Resample arrays or sparse matrices in a consistent way The default strategy implements one step of the bootstrapping procedure. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. replace : boolean, True by default Implements resampling with replacement. If False, this will implement (sliced) random permutations. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. If replace is False it should not be larger than the length of arrays. random_state : int or RandomState instance Control the shuffling for reproducible behavior. Returns ------- resampled_arrays : sequence of indexable data-structures Sequence of resampled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import resample >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) >>> X array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 4 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([0, 1, 0]) >>> resample(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.shuffle` """ random_state = check_random_state(options.pop('random_state', None)) replace = options.pop('replace', True) max_n_samples = options.pop('n_samples', None) if options: raise ValueError("Unexpected kw arguments: %r" % options.keys()) if len(arrays) == 0: return None first = arrays[0] n_samples = first.shape[0] if hasattr(first, 'shape') else len(first) if max_n_samples is None: max_n_samples = n_samples elif (max_n_samples > n_samples) and (not replace): raise ValueError("Cannot sample %d out of arrays with dim %d" "when replace is False" % (max_n_samples, n_samples)) check_consistent_length(*arrays) if replace: indices = random_state.randint(0, n_samples, size=(max_n_samples,)) else: indices = np.arange(n_samples) random_state.shuffle(indices) indices = indices[:max_n_samples] # convert sparse matrices to CSR for row-based indexing arrays = [a.tocsr() if issparse(a) else a for a in arrays] resampled_arrays = [safe_indexing(a, indices) for a in arrays] if len(resampled_arrays) == 1: # syntactic sugar for the unit argument case return resampled_arrays[0] else: return resampled_arrays def shuffle(*arrays, **options): """Shuffle arrays or sparse matrices in a consistent way This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int or RandomState instance Control the shuffling for reproducible behavior. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.resample` """ options['replace'] = False return resample(*arrays, **options) def safe_sqr(X, copy=True): """Element wise squaring of array-likes and sparse matrices. Parameters ---------- X : array like, matrix, sparse matrix copy : boolean, optional, default True Whether to create a copy of X and operate on it or to perform inplace computation (default behaviour). Returns ------- X ** 2 : element wise square """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False) if issparse(X): if copy: X = X.copy() X.data **= 2 else: if copy: X = X ** 2 else: X **= 2 return X def gen_batches(n, batch_size): """Generator to create slices containing batch_size elements, from 0 to n. The last slice may contain less than batch_size elements, when batch_size does not divide n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] """ start = 0 for _ in range(int(n // batch_size)): end = start + batch_size yield slice(start, end) start = end if start < n: yield slice(start, n) def gen_even_slices(n, n_packs, n_samples=None): """Generator to create n_packs slices going up to n. Pass n_samples when the slices are to be used for sparse matrix indexing; slicing off-the-end raises an exception, while it works for NumPy arrays. Examples -------- >>> from sklearn.utils import gen_even_slices >>> list(gen_even_slices(10, 1)) [slice(0, 10, None)] >>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] >>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] >>> list(gen_even_slices(10, 3)) [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)] """ start = 0 if n_packs < 1: raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs) for pack_num in range(n_packs): this_n = n // n_packs if pack_num < n % n_packs: this_n += 1 if this_n > 0: end = start + this_n if n_samples is not None: end = min(n_samples, end) yield slice(start, end, None) start = end def _get_n_jobs(n_jobs): """Get number of jobs for the computation. This function reimplements the logic of joblib to determine the actual number of jobs depending on the cpu count. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. Parameters ---------- n_jobs : int Number of jobs stated in joblib convention. Returns ------- n_jobs : int The actual number of jobs as positive integer. Examples -------- >>> from sklearn.utils import _get_n_jobs >>> _get_n_jobs(4) 4 >>> jobs = _get_n_jobs(-2) >>> assert jobs == max(cpu_count() - 1, 1) >>> _get_n_jobs(0) Traceback (most recent call last): ... ValueError: Parameter n_jobs == 0 has no meaning. """ if n_jobs < 0: return max(cpu_count() + 1 + n_jobs, 1) elif n_jobs == 0: raise ValueError('Parameter n_jobs == 0 has no meaning.') else: return n_jobs def tosequence(x): """Cast iterable x to a Sequence, avoiding a copy if possible.""" if isinstance(x, np.ndarray): return np.asarray(x) elif isinstance(x, Sequence): return x else: return list(x)
bsd-3-clause
emanjavacas/text-nn
scripts/hypergender.py
1
8150
import os import math from pprint import pprint from collections import Counter import argparse from sklearn import metrics import torch import torch.nn as nn from seqmod.misc.preprocess import text_processor from seqmod.misc.optimizer import Optimizer from seqmod.misc.trainer import Trainer from seqmod.misc.loggers import StdLogger from seqmod.misc.dataset import PairedDataset from seqmod.misc.early_stopping import EarlyStopping from seqmod.hyper import make_sampler, Hyperband import seqmod.utils as u from text_nn.loaders import load_twisty, load_dataset, load_embeddings from text_nn import models def compute_scores(model, dataset): trues, preds = [], [] for sources, targets in dataset: _, b_preds = model.predict(sources) trues.extend(targets.view(-1).data.tolist()) preds.extend(b_preds.view(-1).data.tolist()) return trues, preds def make_score_hook(model, dataset): def hook(trainer, epoch, batch, checkpoint): trues, preds = compute_scores(model, dataset) trainer.log("info", metrics.classification_report(trues, preds)) return hook if __name__ == '__main__': parser = argparse.ArgumentParser() # model parser.add_argument('--emb_dim', default=50, type=int) parser.add_argument('--hid_dim', default=50, type=int) parser.add_argument('--dropout', default=0.0, type=float) parser.add_argument('--load_embeddings', action='store_true') parser.add_argument('--flavor', default=None) parser.add_argument('--suffix', default=None) parser.add_argument('--max_dim', default=100, type=int) parser.add_argument('--out_channels', default=(12,), nargs='+', type=int) parser.add_argument('--kernel_sizes', nargs='+', type=int, default=(5, 4, 3)) parser.add_argument('--act', default='relu') parser.add_argument('--ktop', default=4, type=int) # training parser.add_argument('--optim', default='Adam') parser.add_argument('--learning_rate', default=0.001, type=float) parser.add_argument('--max_norm', default=20., type=float) parser.add_argument('--batch_size', type=int, default=264) parser.add_argument('--weight_decay', type=float, default=0.0) parser.add_argument('--gpu', action='store_true') parser.add_argument('--checkpoints', default=100, type=int) parser.add_argument('--hooks_per_epoch', default=10, type=int) parser.add_argument('--max_iter', default=81, type=int) parser.add_argument('--eta', default=3, type=int) # dataset parser.add_argument('--dev', default=0.1, type=float) parser.add_argument('--test', default=0.2, type=float) parser.add_argument('--min_len', default=5, type=int) parser.add_argument('--min_freq', default=5, type=int) parser.add_argument('--level', default='token') parser.add_argument('--concat', action='store_true') parser.add_argument('--cache_data', action='store_true') parser.add_argument('--max_tweets', type=int, default=0) args = parser.parse_args() print("Loading data...") prefix = '{level}.{min_len}.{min_freq}.{concat}.{max_tweets}'.format(**vars(args)) if not args.cache_data or not os.path.isfile('data/{}_train.pt'.format(prefix)): src, trg = load_twisty( min_len=args.min_len, level=args.level, concat=args.concat, processor=text_processor(lower=False), max_tweets=None if args.max_tweets == 0 else args.max_tweets) train, test, valid = load_dataset( src, trg, args.batch_size, min_freq=args.min_freq, gpu=args.gpu, dev=args.dev, test=args.test) if args.cache_data: train.to_disk('data/{}_train.pt'.format(prefix)) test.to_disk('data/{}_test.pt'.format(prefix)) valid.to_disk('data/{}_valid.pt'.format(prefix)) else: train = PairedDataset.from_disk('data/{}_train.pt'.format(prefix)) valid = PairedDataset.from_disk('data/{}_valid.pt'.format(prefix)) train.set_gpu(args.gpu), valid.set_gpu(args.gpu) datasets = {'train': train, 'valid': valid} weight = None if args.load_embeddings: print("Loading pretrained embeddings") weight = load_embeddings( train.d['src'].vocab, args.flavor, args.suffix, 'data') print("Starting experiment") sampler = make_sampler({ 'emb_dim': ['uniform', int, 20, 100], 'hid_dim': ['uniform', int, 20, 100], 'dropout': ['loguniform', float, math.log(0.1), math.log(0.5)], 'model': ['choice', str, ('CNNText', 'DCNN', 'RCNN', 'RNNText', 'ConvRec')], # 'load_embeddings': ['choice', bool, (True, False)], 'max_dim': ['uniform', int, 50, 200], # not applying to DCNN 'out_channels': ['uniform', int, 10, 150], 'kernel_sizes': ['choice', tuple, [ (5, 4, 3), (7, 5, 4, 3), (9, 7, 5, 4, 3), (7, 5, 3), (9, 5, 3, 2), (12, 9, 6, 3)]], # only applying to DCNN: increase kernel_sizes, out_channels by factor 'dcnn_factor': ['uniform', int, 1, 3], 'ktop': ['uniform', int, 3, 8], # 'lr': ['loguniform', float, math.log(0.001), math.log(0.05)] }) vocab, n_classes = len(train.d['src'].vocab), len(train.d['trg'].vocab) class create_runner(object): def __init__(self, params): self.trainer, self.early_stopping = None, None if params['model'] == 'DCNN': kernel_sizes, out_channels = (7, 5), (6, 14), out_channels = tuple( [c * params['dcnn_factor'] for c in out_channels]) kernel_sizes = tuple( [k * params['dcnn_factor'] for k in kernel_sizes]) else: kernel_sizes = params['kernel_sizes'] out_channels = params['out_channels'] if params.get('load_embeddings', None): if not args.load_embeddings: raise ValueError("Need load_embeddings") emb_dim = args.emb_dim else: emb_dim = params['emb_dim'] model = getattr(models, params['model'])( n_classes, vocab, emb_dim=emb_dim, hid_dim=params['hid_dim'], dropout=params['dropout'], padding_idx=train.d['src'].get_pad(), # cnn act=args.act, out_channels=out_channels, kernel_sizes=kernel_sizes, # - rcnn only max_dim=params['max_dim'], # - DCNN only ktop=params['ktop']) u.initialize_model(model) if params.get('load_embeddings', None): model.init_embeddings(weight) optimizer = Optimizer( model.parameters(), args.optim, lr=args.learning_rate, max_norm=args.max_norm, weight_decay=args.weight_decay) self.early_stopping = EarlyStopping( 5, patience=3, reset_patience=False) def early_stop_hook(trainer, epoch, batch_num, num_checkpoints): valid_loss = trainer.validate_model() self.early_stopping.add_checkpoint(sum(valid_loss.pack())) trainer = Trainer(model, datasets, optimizer) # trainer.add_loggers(StdLogger()) # trainer.add_hook(make_score_hook(model, valid), # hooks_per_epoch=args.hooks_per_epoch) trainer.add_hook(early_stop_hook, hooks_per_epoch=5) self.trainer = trainer def __call__(self, n_iters): batches = int(len(train) / args.max_iter) * 5 print("Training {}".format(batches * n_iters)) if args.gpu: self.trainer.model.cuda() (_, loss), _ = self.trainer.train_batches( batches * n_iters, args.checkpoints, shuffle=True) self.trainer.model.cpu() return {'loss': loss, 'early_stop': self.early_stopping.stopped} hb = Hyperband( sampler, create_runner, max_iter=args.max_iter, eta=args.eta) result = hb.run() pprint(result)
mit
boomsbloom/dtm-fmri
DTM/for_gensim/lib/python2.7/site-packages/pandas/sparse/series.py
7
28462
""" Data structures for sparse float data. Life is made simpler by dealing only with float64 data """ # pylint: disable=E1101,E1103,W0231 import numpy as np import warnings from pandas.types.missing import isnull, notnull from pandas.types.common import is_scalar from pandas.core.common import _values_from_object, _maybe_match_name from pandas.compat.numpy import function as nv from pandas.core.index import Index, _ensure_index, InvalidIndexError from pandas.core.series import Series from pandas.core.frame import DataFrame from pandas.core.internals import SingleBlockManager from pandas.core import generic import pandas.core.common as com import pandas.core.ops as ops import pandas.index as _index from pandas.util.decorators import Appender from pandas.sparse.array import (make_sparse, _sparse_array_op, SparseArray, _make_index) from pandas._sparse import BlockIndex, IntIndex import pandas._sparse as splib from pandas.sparse.scipy_sparse import (_sparse_series_to_coo, _coo_to_sparse_series) _shared_doc_kwargs = dict(klass='SparseSeries', axes_single_arg="{0, 'index'}") # ----------------------------------------------------------------------------- # Wrapper function for Series arithmetic methods def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None, **eval_kwargs): """ Wrapper function for Series arithmetic operations, to avoid code duplication. str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are present for compatibility. """ def wrapper(self, other): if isinstance(other, Series): if not isinstance(other, SparseSeries): other = other.to_sparse(fill_value=self.fill_value) return _sparse_series_op(self, other, op, name) elif isinstance(other, DataFrame): return NotImplemented elif is_scalar(other): with np.errstate(all='ignore'): new_values = op(self.values, other) return self._constructor(new_values, index=self.index, name=self.name) else: # pragma: no cover raise TypeError('operation with %s not supported' % type(other)) wrapper.__name__ = name if name.startswith("__"): # strip special method names, e.g. `__add__` needs to be `add` when # passed to _sparse_series_op name = name[2:-2] return wrapper def _sparse_series_op(left, right, op, name): left, right = left.align(right, join='outer', copy=False) new_index = left.index new_name = _maybe_match_name(left, right) result = _sparse_array_op(left.values, right.values, op, name, series=True) return left._constructor(result, index=new_index, name=new_name) class SparseSeries(Series): """Data structure for labeled, sparse floating point data Parameters ---------- data : {array-like, Series, SparseSeries, dict} kind : {'block', 'integer'} fill_value : float Code for missing value. Defaults depends on dtype. 0 for int dtype, False for bool dtype, and NaN for other dtypes sparse_index : {BlockIndex, IntIndex}, optional Only if you have one. Mainly used internally Notes ----- SparseSeries objects are immutable via the typical Python means. If you must change values, convert to dense, make your changes, then convert back to sparse """ _subtyp = 'sparse_series' def __init__(self, data=None, index=None, sparse_index=None, kind='block', fill_value=None, name=None, dtype=None, copy=False, fastpath=False): # we are called internally, so short-circuit if fastpath: # data is an ndarray, index is defined if not isinstance(data, SingleBlockManager): data = SingleBlockManager(data, index, fastpath=True) if copy: data = data.copy() else: if data is None: data = [] if isinstance(data, Series) and name is None: name = data.name if isinstance(data, SparseArray): if index is not None: assert (len(index) == len(data)) sparse_index = data.sp_index if fill_value is None: fill_value = data.fill_value data = np.asarray(data) elif isinstance(data, SparseSeries): if index is None: index = data.index.view() if fill_value is None: fill_value = data.fill_value # extract the SingleBlockManager data = data._data elif isinstance(data, (Series, dict)): if index is None: index = data.index.view() data = Series(data) res = make_sparse(data, kind=kind, fill_value=fill_value) data, sparse_index, fill_value = res elif isinstance(data, (tuple, list, np.ndarray)): # array-like if sparse_index is None: res = make_sparse(data, kind=kind, fill_value=fill_value) data, sparse_index, fill_value = res else: assert (len(data) == sparse_index.npoints) elif isinstance(data, SingleBlockManager): if dtype is not None: data = data.astype(dtype) if index is None: index = data.index.view() else: data = data.reindex(index, copy=False) else: length = len(index) if data == fill_value or (isnull(data) and isnull(fill_value)): if kind == 'block': sparse_index = BlockIndex(length, [], []) else: sparse_index = IntIndex(length, []) data = np.array([]) else: if kind == 'block': locs, lens = ([0], [length]) if length else ([], []) sparse_index = BlockIndex(length, locs, lens) else: sparse_index = IntIndex(length, index) v = data data = np.empty(length) data.fill(v) if index is None: index = com._default_index(sparse_index.length) index = _ensure_index(index) # create/copy the manager if isinstance(data, SingleBlockManager): if copy: data = data.copy() else: # create a sparse array if not isinstance(data, SparseArray): data = SparseArray(data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype, copy=copy) data = SingleBlockManager(data, index) generic.NDFrame.__init__(self, data) self.index = index self.name = name @property def values(self): """ return the array """ return self.block.values def __array__(self, result=None): """ the array interface, return my values """ return self.block.values def get_values(self): """ same as values """ return self.block.to_dense().view() @property def block(self): return self._data._block @property def fill_value(self): return self.block.fill_value @fill_value.setter def fill_value(self, v): self.block.fill_value = v @property def sp_index(self): return self.block.sp_index @property def sp_values(self): return self.values.sp_values @property def npoints(self): return self.sp_index.npoints @classmethod def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None, fastpath=False): """ Simplified alternate constructor """ return cls(arr, index=index, name=name, copy=copy, fill_value=fill_value, fastpath=fastpath) @property def _constructor(self): return SparseSeries @property def _constructor_expanddim(self): from pandas.sparse.api import SparseDataFrame return SparseDataFrame @property def kind(self): if isinstance(self.sp_index, BlockIndex): return 'block' elif isinstance(self.sp_index, IntIndex): return 'integer' def as_sparse_array(self, kind=None, fill_value=None, copy=False): """ return my self as a sparse array, do not copy by default """ if fill_value is None: fill_value = self.fill_value if kind is None: kind = self.kind return SparseArray(self.values, sparse_index=self.sp_index, fill_value=fill_value, kind=kind, copy=copy) def __len__(self): return len(self.block) @property def shape(self): return self._data.shape def __unicode__(self): # currently, unicode is same as repr...fixes infinite loop series_rep = Series.__unicode__(self) rep = '%s\n%s' % (series_rep, repr(self.sp_index)) return rep def __array_wrap__(self, result, context=None): """ Gets called prior to a ufunc (and after) See SparseArray.__array_wrap__ for detail. """ if isinstance(context, tuple) and len(context) == 3: ufunc, args, domain = context args = [getattr(a, 'fill_value', a) for a in args] with np.errstate(all='ignore'): fill_value = ufunc(self.fill_value, *args[1:]) else: fill_value = self.fill_value return self._constructor(result, index=self.index, sparse_index=self.sp_index, fill_value=fill_value, copy=False).__finalize__(self) def __array_finalize__(self, obj): """ Gets called after any ufunc or other array operations, necessary to pass on the index. """ self.name = getattr(obj, 'name', None) self.fill_value = getattr(obj, 'fill_value', None) def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): """ perform a reduction operation """ return op(self.get_values(), skipna=skipna, **kwds) def __getstate__(self): # pickling return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data, fill_value=self.fill_value, name=self.name) def _unpickle_series_compat(self, state): nd_state, own_state = state # recreate the ndarray data = np.empty(nd_state[1], dtype=nd_state[2]) np.ndarray.__setstate__(data, nd_state) index, fill_value, sp_index = own_state[:3] name = None if len(own_state) > 3: name = own_state[3] # create a sparse array if not isinstance(data, SparseArray): data = SparseArray(data, sparse_index=sp_index, fill_value=fill_value, copy=False) # recreate data = SingleBlockManager(data, index, fastpath=True) generic.NDFrame.__init__(self, data) self._set_axis(0, index) self.name = name def __iter__(self): """ forward to the array """ return iter(self.values) def _set_subtyp(self, is_all_dates): if is_all_dates: object.__setattr__(self, '_subtyp', 'sparse_time_series') else: object.__setattr__(self, '_subtyp', 'sparse_series') def _ixs(self, i, axis=0): """ Return the i-th value or values in the SparseSeries by location Parameters ---------- i : int, slice, or sequence of integers Returns ------- value : scalar (int) or Series (slice, sequence) """ label = self.index[i] if isinstance(label, Index): return self.take(i, axis=axis, convert=True) else: return self._get_val_at(i) def _get_val_at(self, loc): """ forward to the array """ return self.block.values._get_val_at(loc) def __getitem__(self, key): try: return self.index.get_value(self, key) except InvalidIndexError: pass except KeyError: if isinstance(key, (int, np.integer)): return self._get_val_at(key) elif key is Ellipsis: return self raise Exception('Requested index not in this series!') except TypeError: # Could not hash item, must be array-like? pass key = _values_from_object(key) if self.index.nlevels > 1 and isinstance(key, tuple): # to handle MultiIndex labels key = self.index.get_loc(key) return self._constructor(self.values[key], index=self.index[key]).__finalize__(self) def _get_values(self, indexer): try: return self._constructor(self._data.get_slice(indexer), fastpath=True).__finalize__(self) except Exception: return self[indexer] def _set_with_engine(self, key, value): return self.set_value(key, value) def abs(self): """ Return an object with absolute value taken. Only applicable to objects that are all numeric Returns ------- abs: type of caller """ return self._constructor(np.abs(self.values), index=self.index).__finalize__(self) def get(self, label, default=None): """ Returns value occupying requested label, default to specified missing value if not present. Analogous to dict.get Parameters ---------- label : object Label value looking for default : object, optional Value to return if label not in index Returns ------- y : scalar """ if label in self.index: loc = self.index.get_loc(label) return self._get_val_at(loc) else: return default def get_value(self, label, takeable=False): """ Retrieve single value at passed index label Parameters ---------- index : label takeable : interpret the index as indexers, default False Returns ------- value : scalar value """ loc = label if takeable is True else self.index.get_loc(label) return self._get_val_at(loc) def set_value(self, label, value, takeable=False): """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index Parameters ---------- label : object Partial indexing with MultiIndex not allowed value : object Scalar value takeable : interpret the index as indexers, default False Notes ----- This method *always* returns a new object. It is not particularly efficient but is provided for API compatibility with Series Returns ------- series : SparseSeries """ values = self.to_dense() # if the label doesn't exist, we will create a new object here # and possibily change the index new_values = values.set_value(label, value, takeable=takeable) if new_values is not None: values = new_values new_index = values.index values = SparseArray(values, fill_value=self.fill_value, kind=self.kind) self._data = SingleBlockManager(values, new_index) self._index = new_index def _set_values(self, key, value): # this might be inefficient as we have to recreate the sparse array # rather than setting individual elements, but have to convert # the passed slice/boolean that's in dense space into a sparse indexer # not sure how to do that! if isinstance(key, Series): key = key.values values = self.values.to_dense() values[key] = _index.convert_scalar(values, value) values = SparseArray(values, fill_value=self.fill_value, kind=self.kind) self._data = SingleBlockManager(values, self.index) def to_dense(self, sparse_only=False): """ Convert SparseSeries to (dense) Series """ if sparse_only: int_index = self.sp_index.to_int_index() index = self.index.take(int_index.indices) return Series(self.sp_values, index=index, name=self.name) else: return Series(self.values.to_dense(), index=self.index, name=self.name) @property def density(self): r = float(self.sp_index.npoints) / float(self.sp_index.length) return r def copy(self, deep=True): """ Make a copy of the SparseSeries. Only the actual sparse values need to be copied """ new_data = self._data if deep: new_data = self._data.copy() return self._constructor(new_data, sparse_index=self.sp_index, fill_value=self.fill_value).__finalize__(self) def reindex(self, index=None, method=None, copy=True, limit=None, **kwargs): """ Conform SparseSeries to new Index See Series.reindex docstring for general behavior Returns ------- reindexed : SparseSeries """ new_index = _ensure_index(index) if self.index.equals(new_index): if copy: return self.copy() else: return self return self._constructor(self._data.reindex(new_index, method=method, limit=limit, copy=copy), index=new_index).__finalize__(self) def sparse_reindex(self, new_index): """ Conform sparse values to new SparseIndex Parameters ---------- new_index : {BlockIndex, IntIndex} Returns ------- reindexed : SparseSeries """ if not isinstance(new_index, splib.SparseIndex): raise TypeError('new index must be a SparseIndex') block = self.block.sparse_reindex(new_index) new_data = SingleBlockManager(block, self.index) return self._constructor(new_data, index=self.index, sparse_index=new_index, fill_value=self.fill_value).__finalize__(self) def take(self, indices, axis=0, convert=True, *args, **kwargs): """ Sparse-compatible version of ndarray.take Returns ------- taken : ndarray """ convert = nv.validate_take_with_convert(convert, args, kwargs) new_values = SparseArray.take(self.values, indices) new_index = self.index.take(indices) return self._constructor(new_values, index=new_index).__finalize__(self) def cumsum(self, axis=0, *args, **kwargs): """ Cumulative sum of values. Preserves locations of NaN values Returns ------- cumsum : SparseSeries if `self` has a null `fill_value` and a generic Series otherwise """ nv.validate_cumsum(args, kwargs) new_array = SparseArray.cumsum(self.values) if isinstance(new_array, SparseArray): return self._constructor( new_array, index=self.index, sparse_index=new_array.sp_index).__finalize__(self) # TODO: gh-12855 - return a SparseSeries here return Series(new_array, index=self.index).__finalize__(self) @Appender(generic._shared_docs['isnull']) def isnull(self): arr = SparseArray(isnull(self.values.sp_values), sparse_index=self.values.sp_index, fill_value=isnull(self.fill_value)) return self._constructor(arr, index=self.index).__finalize__(self) @Appender(generic._shared_docs['isnotnull']) def isnotnull(self): arr = SparseArray(notnull(self.values.sp_values), sparse_index=self.values.sp_index, fill_value=notnull(self.fill_value)) return self._constructor(arr, index=self.index).__finalize__(self) def dropna(self, axis=0, inplace=False, **kwargs): """ Analogous to Series.dropna. If fill_value=NaN, returns a dense Series """ # TODO: make more efficient axis = self._get_axis_number(axis or 0) dense_valid = self.to_dense().valid() if inplace: raise NotImplementedError("Cannot perform inplace dropna" " operations on a SparseSeries") if isnull(self.fill_value): return dense_valid else: dense_valid = dense_valid[dense_valid != self.fill_value] return dense_valid.to_sparse(fill_value=self.fill_value) @Appender(generic._shared_docs['shift'] % _shared_doc_kwargs) def shift(self, periods, freq=None, axis=0): if periods == 0: return self.copy() # no special handling of fill values yet if not isnull(self.fill_value): shifted = self.to_dense().shift(periods, freq=freq, axis=axis) return shifted.to_sparse(fill_value=self.fill_value, kind=self.kind) if freq is not None: return self._constructor( self.sp_values, sparse_index=self.sp_index, index=self.index.shift(periods, freq), fill_value=self.fill_value).__finalize__(self) int_index = self.sp_index.to_int_index() new_indices = int_index.indices + periods start, end = new_indices.searchsorted([0, int_index.length]) new_indices = new_indices[start:end] new_sp_index = _make_index(len(self), new_indices, self.sp_index) arr = self.values._simple_new(self.sp_values[start:end].copy(), new_sp_index, fill_value=np.nan) return self._constructor(arr, index=self.index).__finalize__(self) def combine_first(self, other): """ Combine Series values, choosing the calling Series's values first. Result index will be the union of the two indexes Parameters ---------- other : Series Returns ------- y : Series """ if isinstance(other, SparseSeries): other = other.to_dense() dense_combined = self.to_dense().combine_first(other) return dense_combined.to_sparse(fill_value=self.fill_value) def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex. Use row_levels and column_levels to determine the row and column coordinates respectively. row_levels and column_levels are the names (labels) or numbers of the levels. {row_levels, column_levels} must be a partition of the MultiIndex level names (or numbers). .. versionadded:: 0.16.0 Parameters ---------- row_levels : tuple/list column_levels : tuple/list sort_labels : bool, default False Sort the row and column labels before forming the sparse matrix. Returns ------- y : scipy.sparse.coo_matrix rows : list (row labels) columns : list (column labels) Examples -------- >>> from numpy import nan >>> s = Series([3.0, nan, 1.0, 3.0, nan, nan]) >>> s.index = MultiIndex.from_tuples([(1, 2, 'a', 0), (1, 2, 'a', 1), (1, 1, 'b', 0), (1, 1, 'b', 1), (2, 1, 'b', 0), (2, 1, 'b', 1)], names=['A', 'B', 'C', 'D']) >>> ss = s.to_sparse() >>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'], column_levels=['C', 'D'], sort_labels=True) >>> A <3x4 sparse matrix of type '<class 'numpy.float64'>' with 3 stored elements in COOrdinate format> >>> A.todense() matrix([[ 0., 0., 1., 3.], [ 3., 0., 0., 0.], [ 0., 0., 0., 0.]]) >>> rows [(1, 1), (1, 2), (2, 1)] >>> columns [('a', 0), ('a', 1), ('b', 0), ('b', 1)] """ A, rows, columns = _sparse_series_to_coo(self, row_levels, column_levels, sort_labels=sort_labels) return A, rows, columns @classmethod def from_coo(cls, A, dense_index=False): """ Create a SparseSeries from a scipy.sparse.coo_matrix. .. versionadded:: 0.16.0 Parameters ---------- A : scipy.sparse.coo_matrix dense_index : bool, default False If False (default), the SparseSeries index consists of only the coords of the non-null entries of the original coo_matrix. If True, the SparseSeries index consists of the full sorted (row, col) coordinates of the coo_matrix. Returns ------- s : SparseSeries Examples --------- >>> from scipy import sparse >>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)) >>> A <3x4 sparse matrix of type '<class 'numpy.float64'>' with 3 stored elements in COOrdinate format> >>> A.todense() matrix([[ 0., 0., 1., 2.], [ 3., 0., 0., 0.], [ 0., 0., 0., 0.]]) >>> ss = SparseSeries.from_coo(A) >>> ss 0 2 1 3 2 1 0 3 dtype: float64 BlockIndex Block locations: array([0], dtype=int32) Block lengths: array([3], dtype=int32) """ return _coo_to_sparse_series(A, dense_index=dense_index) # overwrite series methods with unaccelerated versions ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False, **ops.series_special_funcs) ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False, **ops.series_flex_funcs) # overwrite basic arithmetic to use SparseSeries version # force methods to overwrite previous definitions. ops.add_special_arithmetic_methods(SparseSeries, _arith_method, comp_method=_arith_method, bool_method=None, use_numexpr=False, force=True) # backwards compatiblity class SparseTimeSeries(SparseSeries): def __init__(self, *args, **kwargs): # deprecation TimeSeries, #10890 warnings.warn("SparseTimeSeries is deprecated. Please use " "SparseSeries", FutureWarning, stacklevel=2) super(SparseTimeSeries, self).__init__(*args, **kwargs)
mit
philipwfowler/jitter
jitter/core.py
1
2098
#! /usr/bin/env python import pandas,numpy def jitter_data(input_file,sep="\t",x_step=0.5,y_step=0.5,columns=[0,1],x_centre=0,output_file="jittered.dat"): ''' jitters data ''' # read in the plain text data file as a pandas dataframe df=pandas.read_csv(input_file,sep="\t",names=["label","y"],usecols=columns) # # assuming it has a fileextension # tmp=input_file.split('.') # file_stem=tmp[:-1][0] # file_ending=tmp[-1] # sort the dataset in descending order df.sort_values(by=['y'],ascending=False,inplace=True) # add an extra column to record the x values df['x'] = pandas.Series(numpy.zeros(len(df)), index=df.index) # determine the minimum and maximum number of y_steps that encapsulates the data ymin=int(df.y.min()/y_step) ymax=int(df.y.max()/y_step)+2 # now step through the bands of data for iy in range(ymin,ymax): # create an array of Booleans identifying which points lie in the current range points_in_range=((df.y > (iy*y_step)) & (df.y <= (iy+1)*y_step)) # count the number of points in the current range num_points = numpy.sum(points_in_range) if num_points>1: if (num_points % 2)==0: # if there are an even number create the positive side (which here is [1,2]) a=numpy.arange(1,(num_points/2)+1,1) else: # otherwise if there are an odd number create the positive side (which here is [0,1,2]) a=numpy.arange(0,int(num_points/2.)+1,1) # then the negative side which is [-1,-2] b=numpy.arange(-1,int(num_points/-2.)-1,-1) # now create a new array that can hold both, c=numpy.empty((a.size + b.size,), dtype=a.dtype) # ..and interweave them c[0::2] = a c[1::2] = b df.loc[points_in_range,'x']=(c*x_step)+x_centre else: df.loc[points_in_range,'x']=x_centre # finally save it all to disc df.to_csv(output_file,header=False,index=False,sep=sep)
mit
Cophy08/ggplot
ggplot/scales/scale_facet.py
13
10175
from __future__ import (absolute_import, division, print_function, unicode_literals) # TODO: This is fairly repetiive and can definitely be # condensed into a lot less code, but it's working for now import numpy as np import matplotlib.pyplot as plt from .utils import calc_axis_breaks_and_limits import sys def scale_facet_wrap(rows, cols, positions, scaletype): """Set the scales on each subplot for wrapped faceting. Parameters ---------- rows : int number of rows in the faceted plot cols : int number of columns in the faceted plot positions : list of int zero-indexed list of faceted plot positions scaletype : str or None string indicating the type of scaling to apply to the rows and columns - None : All plots get the same scale - 'free_x' : each plot is free to determine its own x-scale, all plots have the same y-scale - 'free_y' : each plot is free to determine its own y-scale, all plots have the same x-scale - 'free' : plots are free to determine their own x- and y-scales """ x_extents, y_extents = {}, {} # Calculate the extents for the plots for pos in positions: # Work on the subplot at the current position (adding 1 to pos because # matplotlib 1-indexes their subplots) plt.subplot(rows, cols, pos + 1) # Update the x extents for each column column, row = 0, 0 if scaletype in ["free", "free_x"]: # If the x scale is free, all plots get their own x scale column = pos % cols row = int(pos / cols) limits = plt.xlim() # Get the current bounds for this column. Default lower limit is # infinity (because all values < infinity) and the default upper limit # is -infinity (because all values > -infinity). lower, upper = x_extents.get((column, row), (float("inf"), float("-inf"))) lower = min(limits[0], lower) upper = max(limits[1], upper) x_extents[(column, row)] = (lower, upper) column, row = 0, 0 if scaletype in ["free", "free_y"]: # If the y scale is free, all plots get their own y scale column = pos % cols row = int(pos / cols) limits = plt.ylim() # Get the current bounds for this column. Default lower limit is # infinity (because all values < infinity) and the default upper limit # is -infinity (because all values > -infinity). lower, upper = y_extents.get((column, row), (float("inf"), float("-inf"))) lower = min(limits[0], lower) upper = max(limits[1], upper) y_extents[(column, row)] = (lower, upper) for pos in positions: plt.subplot(rows, cols, pos + 1) row = int(pos / cols) column = pos % cols # Find the extents for this position. Default to the extents at # position column 0, row 0, in case all plots use the same scale xmin, xmax = x_extents[(0, 0)] ymin, ymax = y_extents[(0, 0)] if scaletype in ["free", "free_x"]: # If the x scale is free, look up the extents for this column and row xmin, xmax = x_extents[(column, row)] if scaletype in ["free", "free_y"]: # If the y scale is free, look up the extents for this column and row ymin, ymax = y_extents[(column, row)] x_scale, x_min, x_max = calc_axis_breaks_and_limits(xmin, xmax, 4) x_scale = np.round(x_scale, 2) # Only apply x labels to plots if each plot has its own scale or the # plot is in the bottom row of each column. x_labs = [] if scaletype in ["free", "free_x"] or pos in positions[-cols:]: x_labs = x_scale plt.xticks(x_scale, x_labs) plt.xlim(x_min, x_max ) # Set the y-axis scale and labels y_scale, y_min, y_max = calc_axis_breaks_and_limits(ymin, ymax, 4) y_scale = np.round(y_scale, 2) # Only apply y labels to plots if each plot has its own scale or the # plot is in the left column. y_labs = [] if scaletype in ["free", "free_y"] or column == 0: y_labs = y_scale plt.yticks(y_scale, y_labs) plt.ylim(y_min, y_max) def scale_facet_grid(xdim, ydim, facet_pairs, scaletype): # everyone gets the same scales if scaletype is None: min_x, max_x = 999999999, -999999999 min_y, max_y = 999999999, -999999999 for pos, _ in enumerate(facet_pairs): pos += 1 plt.subplot(xdim, ydim, pos) min_x = min(min_x, min(plt.xlim())) max_x = max(max_x, max(plt.xlim())) min_y = min(min_y, min(plt.ylim())) max_y = max(max_y, max(plt.ylim())) y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_y, max_y, 4) y_scale = np.round(y_scale, 2) x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_x, max_x, 4) x_scale = np.round(x_scale, 2) # for all axis set the individual axis limits and ticks for pos, _ in enumerate(facet_pairs): pos += 1 plt.subplot(xdim, ydim, pos) y_labs = y_scale if pos % ydim!=1: y_labs = [] plt.yticks(y_scale, y_labs) plt.ylim(y_min, y_max) x_labs = x_scale if pos <= (len(facet_pairs) - ydim): x_labs = [] plt.xticks(x_scale, x_labs) plt.xlim(x_min, x_max) elif scaletype=="free_y": min_x, max_x = 999999999, -999999999 min_ys, max_ys = {}, {} for pos, _ in enumerate(facet_pairs): pos += 1 plt.subplot(xdim, ydim, pos) y_bucket = int((pos-1) / ydim) min_ys[y_bucket] = min_ys.get(y_bucket, 999999999) max_ys[y_bucket] = max_ys.get(y_bucket, -999999999) min_x = min(min_x, min(plt.xlim())) max_x = max(max_x, max(plt.xlim())) min_ys[y_bucket] = min(min_ys[y_bucket], min(plt.ylim())) max_ys[y_bucket] = max(max_ys[y_bucket], max(plt.ylim())) for pos, _ in enumerate(facet_pairs): pos += 1 plt.subplot(xdim, ydim, pos) y_bucket = int((pos-1) / ydim) y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_ys[y_bucket], max_ys[y_bucket],4) y_scale = np.round(y_scale, 2) y_labs = y_scale if pos % ydim!=1: y_labs = [] plt.yticks(y_scale, y_labs) plt.ylim(y_min, y_max) x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_x, max_x, 4) x_scale = np.round(x_scale, 2) x_labs = x_scale if pos <= (len(facet_pairs) - ydim): x_labs = [] plt.xticks(x_scale, x_labs) plt.xlim(x_min, x_max) elif scaletype=="free_x": min_y, max_y = 999999999, -999999999 min_xs, max_xs = {}, {} for pos, _ in enumerate(facet_pairs): pos += 1 plt.subplot(xdim, ydim, pos) x_bucket = int((pos-1) / xdim) min_xs[x_bucket] = min_xs.get(x_bucket, 999999999) max_xs[x_bucket] = max_xs.get(x_bucket, -999999999) min_y = min(min_y, min(plt.ylim())) max_y = max(max_y, max(plt.ylim())) min_xs[x_bucket] = min(min_xs[x_bucket], min(plt.xlim())) max_xs[x_bucket] = max(max_xs[x_bucket], max(plt.xlim())) for pos, _ in enumerate(facet_pairs): pos += 1 plt.subplot(xdim, ydim, pos) x_bucket = int((pos-1) / xdim) x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_xs[x_bucket], max_xs[x_bucket],4) x_scale = np.round(x_scale, 2) x_labs = x_scale if pos <= ((len(facet_pairs) - ydim)): x_labs = [] plt.xticks(x_scale, x_labs) plt.xlim(x_min, x_max) y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_y, max_y, 4) y_scale = np.round(y_scale, 2) y_labs = y_scale if pos % ydim!=1: y_labs = [] plt.yticks(y_scale, y_labs) plt.ylim(y_min, y_max) else: min_xs, max_xs = {}, {} min_ys, max_ys = {}, {} for pos, _ in enumerate(facet_pairs): pos += 1 plt.subplot(xdim, ydim, pos) x_bucket = int((pos-1) / xdim) min_xs[x_bucket] = min_xs.get(x_bucket, 999999999) max_xs[x_bucket] = max_xs.get(x_bucket, -999999999) min_xs[x_bucket] = min(min_xs[x_bucket], min(plt.xlim())) max_xs[x_bucket] = max(max_xs[x_bucket], max(plt.xlim())) y_bucket = int((pos-1) / ydim) min_ys[y_bucket] = min_ys.get(y_bucket, 999999999) max_ys[y_bucket] = max_ys.get(y_bucket, -999999999) min_ys[y_bucket] = min(min_ys[y_bucket], min(plt.ylim())) max_ys[y_bucket] = max(max_ys[y_bucket], max(plt.ylim())) for pos, _ in enumerate(facet_pairs): pos += 1 plt.subplot(xdim, ydim, pos) x_bucket = int((pos-1) / xdim) x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_xs[x_bucket], max_xs[x_bucket],4) x_scale = np.round(x_scale, 2) x_labs = x_scale if pos <= ((len(facet_pairs) - ydim)): x_labs = [] plt.xticks(x_scale, x_labs) plt.xlim(x_min, x_max) y_bucket = int((pos-1) / ydim) y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_ys[y_bucket], max_ys[y_bucket],4) y_scale = np.round(y_scale, 2) y_labs = y_scale if pos % ydim!=1: y_labs = [] plt.yticks(y_scale, y_labs) plt.ylim(y_min, y_max)
bsd-2-clause
OshynSong/scikit-learn
sklearn/utils/tests/test_murmurhash.py
261
2836
# Author: Olivier Grisel <[email protected]> # # License: BSD 3 clause import numpy as np from sklearn.externals.six import b, u from sklearn.utils.murmurhash import murmurhash3_32 from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal from nose.tools import assert_equal, assert_true def test_mmhash3_int(): assert_equal(murmurhash3_32(3), 847579505) assert_equal(murmurhash3_32(3, seed=0), 847579505) assert_equal(murmurhash3_32(3, seed=42), -1823081949) assert_equal(murmurhash3_32(3, positive=False), 847579505) assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505) assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949) assert_equal(murmurhash3_32(3, positive=True), 847579505) assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505) assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347) def test_mmhash3_int_array(): rng = np.random.RandomState(42) keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32) keys = keys.reshape((3, 2, 1)) for seed in [0, 42]: expected = np.array([murmurhash3_32(int(k), seed) for k in keys.flat]) expected = expected.reshape(keys.shape) assert_array_equal(murmurhash3_32(keys, seed), expected) for seed in [0, 42]: expected = np.array([murmurhash3_32(k, seed, positive=True) for k in keys.flat]) expected = expected.reshape(keys.shape) assert_array_equal(murmurhash3_32(keys, seed, positive=True), expected) def test_mmhash3_bytes(): assert_equal(murmurhash3_32(b('foo'), 0), -156908512) assert_equal(murmurhash3_32(b('foo'), 42), -1322301282) assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784) assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014) def test_mmhash3_unicode(): assert_equal(murmurhash3_32(u('foo'), 0), -156908512) assert_equal(murmurhash3_32(u('foo'), 42), -1322301282) assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784) assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014) def test_no_collision_on_byte_range(): previous_hashes = set() for i in range(100): h = murmurhash3_32(' ' * i, 0) assert_true(h not in previous_hashes, "Found collision on growing empty string") def test_uniform_distribution(): n_bins, n_samples = 10, 100000 bins = np.zeros(n_bins, dtype=np.float) for i in range(n_samples): bins[murmurhash3_32(i, positive=True) % n_bins] += 1 means = bins / n_samples expected = np.ones(n_bins) / n_bins assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
bsd-3-clause
Divergent914/kddcup2015
util.py
1
3334
#!/usr/local/bin/python3 # -*- coding:utf-8 -*- import os import pandas as pd import numpy as np import pickle as pkl import gzip from path_config import (CACHE_PATH, OBJECT_PATH, TEST_DATASET_PATHS, TRAIN_DATASET_PATHS) def cache_path(filename): if not filename.endswith('.pkl') and not filename.endswith('.pklz'): filename += '.pklz' return os.path.join(CACHE_PATH, filename) def dump(obj, path): if path.endswith('.pklz') or path.endswith('.pkl.gz'): with gzip.open(path, 'wb') as f: pkl.dump(obj, f) else: with open(path, 'wb') as f: pkl.dump(obj, f) def fetch(path): if path.endswith('.pklz') or path.endswith('.pkl.gz'): with gzip.open(path, 'rb') as f: data = pkl.load(f) else: with open(path, 'rb') as f: data = pkl.load(f) return data def __cache__(func): def cached_func(path): pkl_path = path + '.pkl' if os.path.exists(pkl_path): data = fetch(pkl_path) else: data = func(path) dump(data, pkl_path) return data return cached_func @__cache__ def load_log(path): """Load log set as pandas DataFrame""" log_set = pd.read_csv(path, parse_dates=['time']) log_set['event'] = log_set['event'].replace('nagivate', 'navigate') return log_set def load_log_train(): """Load training log set as pandas DataFrame""" return load_log(TRAIN_DATASET_PATHS['log']) def load_log_test(): """Load testing log set as pandas DataFrame""" return load_log(TEST_DATASET_PATHS['log']) def load_logs(): """Load all log sets as pandas DataFrame""" return load_log_train().append(load_log_test(), ignore_index=True) @__cache__ def load_enrollment(path): """Load enrollment set as pandas DataFrame""" return pd.read_csv(path) def load_enrollment_train(): """Load training enrollment set as pandas DataFrame""" return load_enrollment(TRAIN_DATASET_PATHS['enrollment']) def load_enrollment_test(): """Load testing enrollment set as pandas DataFrame""" return load_enrollment(TEST_DATASET_PATHS['enrollment']) def load_enrollments(): """Load all enrollment sets as pandas DataFrame""" return load_enrollment_train().append(load_enrollment_test(), ignore_index=True) def load_object(path=OBJECT_PATH): """Load object set as pandas DataFrame""" return pd.read_csv(path, parse_dates=['start'], na_values=['null']) def load_val_y(path=TRAIN_DATASET_PATHS['truth']): """Load enrollment-labels pairs of validation set as numpy ndarray""" return np.loadtxt(path, dtype=np.int, delimiter=',') if __name__ == '__main__': import sys import glob if sys.argv[1] == 'clean': cached_files = glob.glob(cache_path('*.pkl')) cached_files += glob.glob(cache_path('*.pklz')) cached_files += glob.glob(cache_path('*.pkl.gz')) for path in cached_files: os.remove(path) elif sys.argv[1] == 'gzip': cached_files = glob.glob(cache_path('*.pkl')) for path in cached_files: new_path = path + 'z' dump(fetch(path), new_path) os.remove(path)
gpl-2.0
fergalbyrne/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/lines.py
69
48233
""" This module contains all the 2D line class which can draw with a variety of line styles, markers and colors. """ # TODO: expose cap and join style attrs from __future__ import division import numpy as np from numpy import ma from matplotlib import verbose import artist from artist import Artist from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\ flatten from colors import colorConverter from path import Path from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform from matplotlib import rcParams # special-purpose marker identifiers: (TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN, CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = range(8) # COVERAGE NOTE: Never called internally or from examples def unmasked_index_ranges(mask, compressed = True): warnings.warn("Import this directly from matplotlib.cbook", DeprecationWarning) # Warning added 2008/07/22 from matplotlib.cbook import unmasked_index_ranges as _unmasked_index_ranges return _unmasked_index_ranges(mask, compressed=compressed) def segment_hits(cx, cy, x, y, radius): """ Determine if any line segments are within radius of a point. Returns the list of line segments that are within that radius. """ # Process single points specially if len(x) < 2: res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 ) return res # We need to lop the last element off a lot. xr,yr = x[:-1],y[:-1] # Only look at line segments whose nearest point to C on the line # lies within the segment. dx,dy = x[1:]-xr, y[1:]-yr Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0 u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq candidates = (u>=0) & (u<=1) #if any(candidates): print "candidates",xr[candidates] # Note that there is a little area near one side of each point # which will be near neither segment, and another which will # be near both, depending on the angle of the lines. The # following radius test eliminates these ambiguities. point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2 #if any(point_hits): print "points",xr[candidates] candidates = candidates & ~(point_hits[:-1] | point_hits[1:]) # For those candidates which remain, determine how far they lie away # from the line. px,py = xr+u*dx,yr+u*dy line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2 #if any(line_hits): print "lines",xr[candidates] line_hits = line_hits & candidates points, = point_hits.ravel().nonzero() lines, = line_hits.ravel().nonzero() #print points,lines return np.concatenate((points,lines)) class Line2D(Artist): """ A line - the line can have both a solid linestyle connecting all the vertices, and a marker at each vertex. Additionally, the drawing of the solid line is influenced by the drawstyle, eg one can create "stepped" lines in various styles. """ lineStyles = _lineStyles = { # hidden names deprecated '-' : '_draw_solid', '--' : '_draw_dashed', '-.' : '_draw_dash_dot', ':' : '_draw_dotted', 'None' : '_draw_nothing', ' ' : '_draw_nothing', '' : '_draw_nothing', } _drawStyles_l = { 'default' : '_draw_lines', 'steps-mid' : '_draw_steps_mid', 'steps-pre' : '_draw_steps_pre', 'steps-post' : '_draw_steps_post', } _drawStyles_s = { 'steps' : '_draw_steps_pre', } drawStyles = {} drawStyles.update(_drawStyles_l) drawStyles.update(_drawStyles_s) markers = _markers = { # hidden names deprecated '.' : '_draw_point', ',' : '_draw_pixel', 'o' : '_draw_circle', 'v' : '_draw_triangle_down', '^' : '_draw_triangle_up', '<' : '_draw_triangle_left', '>' : '_draw_triangle_right', '1' : '_draw_tri_down', '2' : '_draw_tri_up', '3' : '_draw_tri_left', '4' : '_draw_tri_right', 's' : '_draw_square', 'p' : '_draw_pentagon', '*' : '_draw_star', 'h' : '_draw_hexagon1', 'H' : '_draw_hexagon2', '+' : '_draw_plus', 'x' : '_draw_x', 'D' : '_draw_diamond', 'd' : '_draw_thin_diamond', '|' : '_draw_vline', '_' : '_draw_hline', TICKLEFT : '_draw_tickleft', TICKRIGHT : '_draw_tickright', TICKUP : '_draw_tickup', TICKDOWN : '_draw_tickdown', CARETLEFT : '_draw_caretleft', CARETRIGHT : '_draw_caretright', CARETUP : '_draw_caretup', CARETDOWN : '_draw_caretdown', 'None' : '_draw_nothing', ' ' : '_draw_nothing', '' : '_draw_nothing', } filled_markers = ('o', '^', 'v', '<', '>', 's', 'd', 'D', 'h', 'H', 'p', '*') zorder = 2 validCap = ('butt', 'round', 'projecting') validJoin = ('miter', 'round', 'bevel') def __str__(self): if self._label != "": return "Line2D(%s)"%(self._label) elif hasattr(self, '_x') and len(self._x) > 3: return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\ %(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1]) elif hasattr(self, '_x'): return "Line2D(%s)"\ %(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)])) else: return "Line2D()" def __init__(self, xdata, ydata, linewidth = None, # all Nones default to rc linestyle = None, color = None, marker = None, markersize = None, markeredgewidth = None, markeredgecolor = None, markerfacecolor = None, antialiased = None, dash_capstyle = None, solid_capstyle = None, dash_joinstyle = None, solid_joinstyle = None, pickradius = 5, drawstyle = None, **kwargs ): """ Create a :class:`~matplotlib.lines.Line2D` instance with *x* and *y* data in sequences *xdata*, *ydata*. The kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s See :meth:`set_linestyle` for a decription of the line styles, :meth:`set_marker` for a description of the markers, and :meth:`set_drawstyle` for a description of the draw styles. """ Artist.__init__(self) #convert sequences to numpy arrays if not iterable(xdata): raise RuntimeError('xdata must be a sequence') if not iterable(ydata): raise RuntimeError('ydata must be a sequence') if linewidth is None : linewidth=rcParams['lines.linewidth'] if linestyle is None : linestyle=rcParams['lines.linestyle'] if marker is None : marker=rcParams['lines.marker'] if color is None : color=rcParams['lines.color'] if markersize is None : markersize=rcParams['lines.markersize'] if antialiased is None : antialiased=rcParams['lines.antialiased'] if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle'] if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle'] if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle'] if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle'] if drawstyle is None : drawstyle='default' self.set_dash_capstyle(dash_capstyle) self.set_dash_joinstyle(dash_joinstyle) self.set_solid_capstyle(solid_capstyle) self.set_solid_joinstyle(solid_joinstyle) self.set_linestyle(linestyle) self.set_drawstyle(drawstyle) self.set_linewidth(linewidth) self.set_color(color) self.set_marker(marker) self.set_antialiased(antialiased) self.set_markersize(markersize) self._dashSeq = None self.set_markerfacecolor(markerfacecolor) self.set_markeredgecolor(markeredgecolor) self.set_markeredgewidth(markeredgewidth) self._point_size_reduction = 0.5 self.verticalOffset = None # update kwargs before updating data to give the caller a # chance to init axes (and hence unit support) self.update(kwargs) self.pickradius = pickradius if is_numlike(self._picker): self.pickradius = self._picker self._xorig = np.asarray([]) self._yorig = np.asarray([]) self._invalid = True self.set_data(xdata, ydata) def contains(self, mouseevent): """ Test whether the mouse event occurred on the line. The pick radius determines the precision of the location test (usually within five points of the value). Use :meth:`~matplotlib.lines.Line2D.get_pickradius` or :meth:`~matplotlib.lines.Line2D.set_pickradius` to view or modify it. Returns *True* if any values are within the radius along with ``{'ind': pointlist}``, where *pointlist* is the set of points within the radius. TODO: sort returned indices by distance """ if callable(self._contains): return self._contains(self,mouseevent) if not is_numlike(self.pickradius): raise ValueError,"pick radius should be a distance" # Make sure we have data to plot if self._invalid: self.recache() if len(self._xy)==0: return False,{} # Convert points to pixels path, affine = self._transformed_path.get_transformed_path_and_affine() path = affine.transform_path(path) xy = path.vertices xt = xy[:, 0] yt = xy[:, 1] # Convert pick radius from points to pixels if self.figure == None: warning.warn('no figure set when check if mouse is on line') pixels = self.pickradius else: pixels = self.figure.dpi/72. * self.pickradius # Check for collision if self._linestyle in ['None',None]: # If no line, return the nearby point(s) d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2 ind, = np.nonzero(np.less_equal(d, pixels**2)) else: # If line, return the nearby segment(s) ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels) # Debugging message if False and self._label != u'': print "Checking line",self._label,"at",mouseevent.x,mouseevent.y print 'xt', xt print 'yt', yt #print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2. print 'ind',ind # Return the point(s) within radius return len(ind)>0,dict(ind=ind) def get_pickradius(self): 'return the pick radius used for containment tests' return self.pickradius def setpickradius(self,d): """Sets the pick radius used for containment tests ACCEPTS: float distance in points """ self.pickradius = d def set_picker(self,p): """Sets the event picker details for the line. ACCEPTS: float distance in points or callable pick function ``fn(artist, event)`` """ if callable(p): self._contains = p else: self.pickradius = p self._picker = p def get_window_extent(self, renderer): bbox = Bbox.unit() bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()), ignore=True) # correct for marker size, if any if self._marker is not None: ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5 bbox = bbox.padded(ms) return bbox def set_axes(self, ax): Artist.set_axes(self, ax) if ax.xaxis is not None: self._xcid = ax.xaxis.callbacks.connect('units', self.recache) if ax.yaxis is not None: self._ycid = ax.yaxis.callbacks.connect('units', self.recache) set_axes.__doc__ = Artist.set_axes.__doc__ def set_data(self, *args): """ Set the x and y data ACCEPTS: 2D array """ if len(args)==1: x, y = args[0] else: x, y = args not_masked = 0 if not ma.isMaskedArray(x): x = np.asarray(x) not_masked += 1 if not ma.isMaskedArray(y): y = np.asarray(y) not_masked += 1 if (not_masked < 2 or (x is not self._xorig and (x.shape != self._xorig.shape or np.any(x != self._xorig))) or (y is not self._yorig and (y.shape != self._yorig.shape or np.any(y != self._yorig)))): self._xorig = x self._yorig = y self._invalid = True def recache(self): #if self.axes is None: print 'recache no axes' #else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig): x = ma.asarray(self.convert_xunits(self._xorig), float) y = ma.asarray(self.convert_yunits(self._yorig), float) x = ma.ravel(x) y = ma.ravel(y) else: x = np.asarray(self.convert_xunits(self._xorig), float) y = np.asarray(self.convert_yunits(self._yorig), float) x = np.ravel(x) y = np.ravel(y) if len(x)==1 and len(y)>1: x = x * np.ones(y.shape, float) if len(y)==1 and len(x)>1: y = y * np.ones(x.shape, float) if len(x) != len(y): raise RuntimeError('xdata and ydata must be the same length') x = x.reshape((len(x), 1)) y = y.reshape((len(y), 1)) if ma.isMaskedArray(x) or ma.isMaskedArray(y): self._xy = ma.concatenate((x, y), 1) else: self._xy = np.concatenate((x, y), 1) self._x = self._xy[:, 0] # just a view self._y = self._xy[:, 1] # just a view # Masked arrays are now handled by the Path class itself self._path = Path(self._xy) self._transformed_path = TransformedPath(self._path, self.get_transform()) self._invalid = False def set_transform(self, t): """ set the Transformation instance used by this artist ACCEPTS: a :class:`matplotlib.transforms.Transform` instance """ Artist.set_transform(self, t) self._invalid = True # self._transformed_path = TransformedPath(self._path, self.get_transform()) def _is_sorted(self, x): "return true if x is sorted" if len(x)<2: return 1 return np.alltrue(x[1:]-x[0:-1]>=0) def draw(self, renderer): if self._invalid: self.recache() renderer.open_group('line2d') if not self._visible: return gc = renderer.new_gc() self._set_gc_clip(gc) gc.set_foreground(self._color) gc.set_antialiased(self._antialiased) gc.set_linewidth(self._linewidth) gc.set_alpha(self._alpha) if self.is_dashed(): cap = self._dashcapstyle join = self._dashjoinstyle else: cap = self._solidcapstyle join = self._solidjoinstyle gc.set_joinstyle(join) gc.set_capstyle(cap) gc.set_snap(self.get_snap()) funcname = self._lineStyles.get(self._linestyle, '_draw_nothing') if funcname != '_draw_nothing': tpath, affine = self._transformed_path.get_transformed_path_and_affine() self._lineFunc = getattr(self, funcname) funcname = self.drawStyles.get(self._drawstyle, '_draw_lines') drawFunc = getattr(self, funcname) drawFunc(renderer, gc, tpath, affine.frozen()) if self._marker is not None: gc = renderer.new_gc() self._set_gc_clip(gc) gc.set_foreground(self.get_markeredgecolor()) gc.set_linewidth(self._markeredgewidth) gc.set_alpha(self._alpha) funcname = self._markers.get(self._marker, '_draw_nothing') if funcname != '_draw_nothing': tpath, affine = self._transformed_path.get_transformed_points_and_affine() markerFunc = getattr(self, funcname) markerFunc(renderer, gc, tpath, affine.frozen()) renderer.close_group('line2d') def get_antialiased(self): return self._antialiased def get_color(self): return self._color def get_drawstyle(self): return self._drawstyle def get_linestyle(self): return self._linestyle def get_linewidth(self): return self._linewidth def get_marker(self): return self._marker def get_markeredgecolor(self): if (is_string_like(self._markeredgecolor) and self._markeredgecolor == 'auto'): if self._marker in self.filled_markers: return 'k' else: return self._color else: return self._markeredgecolor return self._markeredgecolor def get_markeredgewidth(self): return self._markeredgewidth def get_markerfacecolor(self): if (self._markerfacecolor is None or (is_string_like(self._markerfacecolor) and self._markerfacecolor.lower()=='none') ): return self._markerfacecolor elif (is_string_like(self._markerfacecolor) and self._markerfacecolor.lower() == 'auto'): return self._color else: return self._markerfacecolor def get_markersize(self): return self._markersize def get_data(self, orig=True): """ Return the xdata, ydata. If *orig* is *True*, return the original data """ return self.get_xdata(orig=orig), self.get_ydata(orig=orig) def get_xdata(self, orig=True): """ Return the xdata. If *orig* is *True*, return the original data, else the processed data. """ if orig: return self._xorig if self._invalid: self.recache() return self._x def get_ydata(self, orig=True): """ Return the ydata. If *orig* is *True*, return the original data, else the processed data. """ if orig: return self._yorig if self._invalid: self.recache() return self._y def get_path(self): """ Return the :class:`~matplotlib.path.Path` object associated with this line. """ if self._invalid: self.recache() return self._path def get_xydata(self): """ Return the *xy* data as a Nx2 numpy array. """ if self._invalid: self.recache() return self._xy def set_antialiased(self, b): """ True if line should be drawin with antialiased rendering ACCEPTS: [True | False] """ self._antialiased = b def set_color(self, color): """ Set the color of the line ACCEPTS: any matplotlib color """ self._color = color def set_drawstyle(self, drawstyle): """ Set the drawstyle of the plot 'default' connects the points with lines. The steps variants produce step-plots. 'steps' is equivalent to 'steps-pre' and is maintained for backward-compatibility. ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ] """ self._drawstyle = drawstyle def set_linewidth(self, w): """ Set the line width in points ACCEPTS: float value in points """ self._linewidth = w def set_linestyle(self, linestyle): """ Set the linestyle of the line (also accepts drawstyles) ================ ================= linestyle description ================ ================= '-' solid '--' dashed '-.' dash_dot ':' dotted 'None' draw nothing ' ' draw nothing '' draw nothing ================ ================= 'steps' is equivalent to 'steps-pre' and is maintained for backward-compatibility. .. seealso:: :meth:`set_drawstyle` ACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and any drawstyle in combination with a linestyle, e.g. 'steps--'. """ # handle long drawstyle names before short ones ! for ds in flatten([k.keys() for k in (self._drawStyles_l, self._drawStyles_s)], is_string_like): if linestyle.startswith(ds): self.set_drawstyle(ds) if len(linestyle) > len(ds): linestyle = linestyle[len(ds):] else: linestyle = '-' if linestyle not in self._lineStyles: if linestyle in ls_mapper: linestyle = ls_mapper[linestyle] else: verbose.report('Unrecognized line style %s, %s' % (linestyle, type(linestyle))) if linestyle in [' ','']: linestyle = 'None' self._linestyle = linestyle def set_marker(self, marker): """ Set the line marker ========== ========================== marker description ========== ========================== '.' point ',' pixel 'o' circle 'v' triangle_down '^' triangle_up '<' triangle_left '>' triangle_right '1' tri_down '2' tri_up '3' tri_left '4' tri_right 's' square 'p' pentagon '*' star 'h' hexagon1 'H' hexagon2 '+' plus 'x' x 'D' diamond 'd' thin_diamond '|' vline '_' hline TICKLEFT tickleft TICKRIGHT tickright TICKUP tickup TICKDOWN tickdown CARETLEFT caretleft CARETRIGHT caretright CARETUP caretup CARETDOWN caretdown 'None' nothing ' ' nothing '' nothing ========== ========================== ACCEPTS: [ '+' | '*' | ',' | '.' | '1' | '2' | '3' | '4' | '<' | '>' | 'D' | 'H' | '^' | '_' | 'd' | 'h' | 'o' | 'p' | 's' | 'v' | 'x' | '|' | TICKUP | TICKDOWN | TICKLEFT | TICKRIGHT | 'None' | ' ' | '' ] """ if marker not in self._markers: verbose.report('Unrecognized marker style %s, %s' % (marker, type(marker))) if marker in [' ','']: marker = 'None' self._marker = marker self._markerFunc = self._markers[marker] def set_markeredgecolor(self, ec): """ Set the marker edge color ACCEPTS: any matplotlib color """ if ec is None : ec = 'auto' self._markeredgecolor = ec def set_markeredgewidth(self, ew): """ Set the marker edge width in points ACCEPTS: float value in points """ if ew is None : ew = rcParams['lines.markeredgewidth'] self._markeredgewidth = ew def set_markerfacecolor(self, fc): """ Set the marker face color ACCEPTS: any matplotlib color """ if fc is None : fc = 'auto' self._markerfacecolor = fc def set_markersize(self, sz): """ Set the marker size in points ACCEPTS: float """ self._markersize = sz def set_xdata(self, x): """ Set the data np.array for x ACCEPTS: 1D array """ x = np.asarray(x) self.set_data(x, self._yorig) def set_ydata(self, y): """ Set the data np.array for y ACCEPTS: 1D array """ y = np.asarray(y) self.set_data(self._xorig, y) def set_dashes(self, seq): """ Set the dash sequence, sequence of dashes with on off ink in points. If seq is empty or if seq = (None, None), the linestyle will be set to solid. ACCEPTS: sequence of on/off ink in points """ if seq == (None, None) or len(seq)==0: self.set_linestyle('-') else: self.set_linestyle('--') self._dashSeq = seq # TODO: offset ignored for now def _draw_lines(self, renderer, gc, path, trans): self._lineFunc(renderer, gc, path, trans) def _draw_steps_pre(self, renderer, gc, path, trans): vertices = self._xy steps = ma.zeros((2*len(vertices)-1, 2), np.float_) steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0] steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1] path = Path(steps) path = path.transformed(self.get_transform()) self._lineFunc(renderer, gc, path, IdentityTransform()) def _draw_steps_post(self, renderer, gc, path, trans): vertices = self._xy steps = ma.zeros((2*len(vertices)-1, 2), np.float_) steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0] steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1] path = Path(steps) path = path.transformed(self.get_transform()) self._lineFunc(renderer, gc, path, IdentityTransform()) def _draw_steps_mid(self, renderer, gc, path, trans): vertices = self._xy steps = ma.zeros((2*len(vertices), 2), np.float_) steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0]) steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0]) steps[0, 0] = vertices[0, 0] steps[-1, 0] = vertices[-1, 0] steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1] path = Path(steps) path = path.transformed(self.get_transform()) self._lineFunc(renderer, gc, path, IdentityTransform()) def _draw_nothing(self, *args, **kwargs): pass def _draw_solid(self, renderer, gc, path, trans): gc.set_linestyle('solid') renderer.draw_path(gc, path, trans) def _draw_dashed(self, renderer, gc, path, trans): gc.set_linestyle('dashed') if self._dashSeq is not None: gc.set_dashes(0, self._dashSeq) renderer.draw_path(gc, path, trans) def _draw_dash_dot(self, renderer, gc, path, trans): gc.set_linestyle('dashdot') renderer.draw_path(gc, path, trans) def _draw_dotted(self, renderer, gc, path, trans): gc.set_linestyle('dotted') renderer.draw_path(gc, path, trans) def _draw_point(self, renderer, gc, path, path_trans): w = renderer.points_to_pixels(self._markersize) * \ self._point_size_reduction * 0.5 gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0) rgbFace = self._get_rgb_face() transform = Affine2D().scale(w) renderer.draw_markers( gc, Path.unit_circle(), transform, path, path_trans, rgbFace) _draw_pixel_transform = Affine2D().translate(-0.5, -0.5) def _draw_pixel(self, renderer, gc, path, path_trans): rgbFace = self._get_rgb_face() gc.set_snap(False) renderer.draw_markers(gc, Path.unit_rectangle(), self._draw_pixel_transform, path, path_trans, rgbFace) def _draw_circle(self, renderer, gc, path, path_trans): w = renderer.points_to_pixels(self._markersize) * 0.5 gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0) rgbFace = self._get_rgb_face() transform = Affine2D().scale(w, w) renderer.draw_markers( gc, Path.unit_circle(), transform, path, path_trans, rgbFace) _triangle_path = Path([[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]]) def _draw_triangle_up(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset, offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, self._triangle_path, transform, path, path_trans, rgbFace) def _draw_triangle_down(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset, -offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, self._triangle_path, transform, path, path_trans, rgbFace) def _draw_triangle_left(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset, offset).rotate_deg(90) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, self._triangle_path, transform, path, path_trans, rgbFace) def _draw_triangle_right(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset, offset).rotate_deg(-90) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, self._triangle_path, transform, path, path_trans, rgbFace) def _draw_square(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 2.0) side = renderer.points_to_pixels(self._markersize) transform = Affine2D().translate(-0.5, -0.5).scale(side) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_rectangle(), transform, path, path_trans, rgbFace) def _draw_diamond(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) side = renderer.points_to_pixels(self._markersize) transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45).scale(side) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_rectangle(), transform, path, path_trans, rgbFace) def _draw_thin_diamond(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = renderer.points_to_pixels(self._markersize) transform = Affine2D().translate(-0.5, -0.5) \ .rotate_deg(45).scale(offset * 0.6, offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_rectangle(), transform, path, path_trans, rgbFace) def _draw_pentagon(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5 * renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_regular_polygon(5), transform, path, path_trans, rgbFace) def _draw_star(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5 * renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) rgbFace = self._get_rgb_face() _starpath = Path.unit_regular_star(5, innerCircle=0.381966) renderer.draw_markers(gc, _starpath, transform, path, path_trans, rgbFace) def _draw_hexagon1(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5 * renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform, path, path_trans, rgbFace) def _draw_hexagon2(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5 * renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(30) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform, path, path_trans, rgbFace) _line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]]) def _draw_vline(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._line_marker_path, transform, path, path_trans) def _draw_hline(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(90) renderer.draw_markers(gc, self._line_marker_path, transform, path, path_trans) _tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]]) def _draw_tickleft(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = renderer.points_to_pixels(self._markersize) marker_transform = Affine2D().scale(-offset, 1.0) renderer.draw_markers(gc, self._tickhoriz_path, marker_transform, path, path_trans) def _draw_tickright(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = renderer.points_to_pixels(self._markersize) marker_transform = Affine2D().scale(offset, 1.0) renderer.draw_markers(gc, self._tickhoriz_path, marker_transform, path, path_trans) _tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]]) def _draw_tickup(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = renderer.points_to_pixels(self._markersize) marker_transform = Affine2D().scale(1.0, offset) renderer.draw_markers(gc, self._tickvert_path, marker_transform, path, path_trans) def _draw_tickdown(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = renderer.points_to_pixels(self._markersize) marker_transform = Affine2D().scale(1.0, -offset) renderer.draw_markers(gc, self._tickvert_path, marker_transform, path, path_trans) _plus_path = Path([[-1.0, 0.0], [1.0, 0.0], [0.0, -1.0], [0.0, 1.0]], [Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO]) def _draw_plus(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._plus_path, transform, path, path_trans) _tri_path = Path([[0.0, 0.0], [0.0, -1.0], [0.0, 0.0], [0.8, 0.5], [0.0, 0.0], [-0.8, 0.5]], [Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO]) def _draw_tri_down(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._tri_path, transform, path, path_trans) def _draw_tri_up(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(180) renderer.draw_markers(gc, self._tri_path, transform, path, path_trans) def _draw_tri_left(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(90) renderer.draw_markers(gc, self._tri_path, transform, path, path_trans) def _draw_tri_right(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(270) renderer.draw_markers(gc, self._tri_path, transform, path, path_trans) _caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]]) def _draw_caretdown(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._caret_path, transform, path, path_trans) def _draw_caretup(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(180) renderer.draw_markers(gc, self._caret_path, transform, path, path_trans) def _draw_caretleft(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(270) renderer.draw_markers(gc, self._caret_path, transform, path, path_trans) def _draw_caretright(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(90) renderer.draw_markers(gc, self._caret_path, transform, path, path_trans) _x_path = Path([[-1.0, -1.0], [1.0, 1.0], [-1.0, 1.0], [1.0, -1.0]], [Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO]) def _draw_x(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._x_path, transform, path, path_trans) def update_from(self, other): 'copy properties from other to self' Artist.update_from(self, other) self._linestyle = other._linestyle self._linewidth = other._linewidth self._color = other._color self._markersize = other._markersize self._markerfacecolor = other._markerfacecolor self._markeredgecolor = other._markeredgecolor self._markeredgewidth = other._markeredgewidth self._dashSeq = other._dashSeq self._dashcapstyle = other._dashcapstyle self._dashjoinstyle = other._dashjoinstyle self._solidcapstyle = other._solidcapstyle self._solidjoinstyle = other._solidjoinstyle self._linestyle = other._linestyle self._marker = other._marker self._drawstyle = other._drawstyle def _get_rgb_face(self): facecolor = self.get_markerfacecolor() if is_string_like(facecolor) and facecolor.lower()=='none': rgbFace = None else: rgbFace = colorConverter.to_rgb(facecolor) return rgbFace # some aliases.... def set_aa(self, val): 'alias for set_antialiased' self.set_antialiased(val) def set_c(self, val): 'alias for set_color' self.set_color(val) def set_ls(self, val): 'alias for set_linestyle' self.set_linestyle(val) def set_lw(self, val): 'alias for set_linewidth' self.set_linewidth(val) def set_mec(self, val): 'alias for set_markeredgecolor' self.set_markeredgecolor(val) def set_mew(self, val): 'alias for set_markeredgewidth' self.set_markeredgewidth(val) def set_mfc(self, val): 'alias for set_markerfacecolor' self.set_markerfacecolor(val) def set_ms(self, val): 'alias for set_markersize' self.set_markersize(val) def get_aa(self): 'alias for get_antialiased' return self.get_antialiased() def get_c(self): 'alias for get_color' return self.get_color() def get_ls(self): 'alias for get_linestyle' return self.get_linestyle() def get_lw(self): 'alias for get_linewidth' return self.get_linewidth() def get_mec(self): 'alias for get_markeredgecolor' return self.get_markeredgecolor() def get_mew(self): 'alias for get_markeredgewidth' return self.get_markeredgewidth() def get_mfc(self): 'alias for get_markerfacecolor' return self.get_markerfacecolor() def get_ms(self): 'alias for get_markersize' return self.get_markersize() def set_dash_joinstyle(self, s): """ Set the join style for dashed linestyles ACCEPTS: ['miter' | 'round' | 'bevel'] """ s = s.lower() if s not in self.validJoin: raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,) + 'valid joinstyles are %s' % (self.validJoin,)) self._dashjoinstyle = s def set_solid_joinstyle(self, s): """ Set the join style for solid linestyles ACCEPTS: ['miter' | 'round' | 'bevel'] """ s = s.lower() if s not in self.validJoin: raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,) + 'valid joinstyles are %s' % (self.validJoin,)) self._solidjoinstyle = s def get_dash_joinstyle(self): """ Get the join style for dashed linestyles """ return self._dashjoinstyle def get_solid_joinstyle(self): """ Get the join style for solid linestyles """ return self._solidjoinstyle def set_dash_capstyle(self, s): """ Set the cap style for dashed linestyles ACCEPTS: ['butt' | 'round' | 'projecting'] """ s = s.lower() if s not in self.validCap: raise ValueError('set_dash_capstyle passed "%s";\n' % (s,) + 'valid capstyles are %s' % (self.validCap,)) self._dashcapstyle = s def set_solid_capstyle(self, s): """ Set the cap style for solid linestyles ACCEPTS: ['butt' | 'round' | 'projecting'] """ s = s.lower() if s not in self.validCap: raise ValueError('set_solid_capstyle passed "%s";\n' % (s,) + 'valid capstyles are %s' % (self.validCap,)) self._solidcapstyle = s def get_dash_capstyle(self): """ Get the cap style for dashed linestyles """ return self._dashcapstyle def get_solid_capstyle(self): """ Get the cap style for solid linestyles """ return self._solidcapstyle def is_dashed(self): 'return True if line is dashstyle' return self._linestyle in ('--', '-.', ':') class VertexSelector: """ Manage the callbacks to maintain a list of selected vertices for :class:`matplotlib.lines.Line2D`. Derived classes should override :meth:`~matplotlib.lines.VertexSelector.process_selected` to do something with the picks. Here is an example which highlights the selected verts with red circles:: import numpy as np import matplotlib.pyplot as plt import matplotlib.lines as lines class HighlightSelected(lines.VertexSelector): def __init__(self, line, fmt='ro', **kwargs): lines.VertexSelector.__init__(self, line) self.markers, = self.axes.plot([], [], fmt, **kwargs) def process_selected(self, ind, xs, ys): self.markers.set_data(xs, ys) self.canvas.draw() fig = plt.figure() ax = fig.add_subplot(111) x, y = np.random.rand(2, 30) line, = ax.plot(x, y, 'bs-', picker=5) selector = HighlightSelected(line) plt.show() """ def __init__(self, line): """ Initialize the class with a :class:`matplotlib.lines.Line2D` instance. The line should already be added to some :class:`matplotlib.axes.Axes` instance and should have the picker property set. """ if not hasattr(line, 'axes'): raise RuntimeError('You must first add the line to the Axes') if line.get_picker() is None: raise RuntimeError('You must first set the picker property of the line') self.axes = line.axes self.line = line self.canvas = self.axes.figure.canvas self.cid = self.canvas.mpl_connect('pick_event', self.onpick) self.ind = set() def process_selected(self, ind, xs, ys): """ Default "do nothing" implementation of the :meth:`process_selected` method. *ind* are the indices of the selected vertices. *xs* and *ys* are the coordinates of the selected vertices. """ pass def onpick(self, event): 'When the line is picked, update the set of selected indicies.' if event.artist is not self.line: return for i in event.ind: if i in self.ind: self.ind.remove(i) else: self.ind.add(i) ind = list(self.ind) ind.sort() xdata, ydata = self.line.get_data() self.process_selected(ind, xdata[ind], ydata[ind]) lineStyles = Line2D._lineStyles lineMarkers = Line2D._markers drawStyles = Line2D.drawStyles artist.kwdocd['Line2D'] = artist.kwdoc(Line2D) # You can not set the docstring of an instancemethod, # but you can on the underlying function. Go figure. Line2D.__init__.im_func.__doc__ = dedent(Line2D.__init__.__doc__) % artist.kwdocd
agpl-3.0
EuroPython/ep-tools
eptools/gspread_utils.py
1
1778
""" Functions to access the data in google drive spreadsheets """ def get_api_key_file(): """ Return the api_key_file path imported from the config.py file""" try: from .config import api_key_file except ImportError: raise ImportError( "Could not find a path to the Google credentials file. " "You can set it up permanently in the config.py file." ) else: return api_key_file def get_ws_data(api_key_file, doc_key, ws_tab_idx, header=None, start_row=1): """ Return the content of the spreadsheet in the ws_tab_idx tab of the spreadsheet with doc_key as a pandas DataFrame. Parameters ---------- api_key_file: str Path to the Google API key json file. doc_key: str ws_tab_idx: int Index of the worksheet within the spreadsheet. header: List[str] List of values to assign to the header of the result. start_row: int Row index from where to start collecting the data. Returns ------- content: pandas.DataFrame """ import pandas as pd from eptools.gdrive import get_spreadsheet, worksheet_to_dict spread = get_spreadsheet(api_key_file, doc_key) ws = spread.get_worksheet(ws_tab_idx) ws_dict = worksheet_to_dict(ws, header=header, start_row=start_row) return pd.DataFrame(ws_dict) def find_one_row(substr, df, col_name): """ Return one row from `df`. The returned row has in `col_name` column a value with a sub-string as `substr. Raise KeyError if no row is found. """ for name in df[col_name]: if substr.lower() in name.lower(): return df[df[col_name] == name] raise KeyError("Could not find {} in the " "pandas dataframe.".format(substr))
mit
DonBeo/scikit-learn
sklearn/decomposition/tests/test_factor_analysis.py
222
3055
# Author: Christian Osendorfer <[email protected]> # Alexandre Gramfort <[email protected]> # Licence: BSD3 import numpy as np from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils import ConvergenceWarning from sklearn.decomposition import FactorAnalysis def test_factor_analysis(): # Test FactorAnalysis ability to recover the data covariance structure rng = np.random.RandomState(0) n_samples, n_features, n_components = 20, 5, 3 # Some random settings for the generative model W = rng.randn(n_components, n_features) # latent variable of dim 3, 20 of it h = rng.randn(n_samples, n_components) # using gamma to model different noise variance # per component noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features) # generate observations # wlog, mean is 0 X = np.dot(h, W) + noise assert_raises(ValueError, FactorAnalysis, svd_method='foo') fa_fail = FactorAnalysis() fa_fail.svd_method = 'foo' assert_raises(ValueError, fa_fail.fit, X) fas = [] for method in ['randomized', 'lapack']: fa = FactorAnalysis(n_components=n_components, svd_method=method) fa.fit(X) fas.append(fa) X_t = fa.transform(X) assert_equal(X_t.shape, (n_samples, n_components)) assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum()) assert_almost_equal(fa.score_samples(X).mean(), fa.score(X)) diff = np.all(np.diff(fa.loglike_)) assert_greater(diff, 0., 'Log likelihood dif not increase') # Sample Covariance scov = np.cov(X, rowvar=0., bias=1.) # Model Covariance mcov = fa.get_covariance() diff = np.sum(np.abs(scov - mcov)) / W.size assert_less(diff, 0.1, "Mean absolute difference is %f" % diff) fa = FactorAnalysis(n_components=n_components, noise_variance_init=np.ones(n_features)) assert_raises(ValueError, fa.fit, X[:, :2]) f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal fa1, fa2 = fas for attr in ['loglike_', 'components_', 'noise_variance_']: assert_almost_equal(f(fa1, attr), f(fa2, attr)) fa1.max_iter = 1 fa1.verbose = True assert_warns(ConvergenceWarning, fa1.fit, X) # Test get_covariance and get_precision with n_components == n_features # with n_components < n_features and with n_components == 0 for n_components in [0, 2, X.shape[1]]: fa.n_components = n_components fa.fit(X) cov = fa.get_covariance() precision = fa.get_precision() assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
bsd-3-clause
ahoyosid/scikit-learn
examples/linear_model/plot_sgd_loss_functions.py
249
1095
""" ========================== SGD: convex loss functions ========================== A plot that compares the various convex loss functions supported by :class:`sklearn.linear_model.SGDClassifier` . """ print(__doc__) import numpy as np import matplotlib.pyplot as plt def modified_huber_loss(y_true, y_pred): z = y_pred * y_true loss = -4 * z loss[z >= -1] = (1 - z[z >= -1]) ** 2 loss[z >= 1.] = 0 return loss xmin, xmax = -4, 4 xx = np.linspace(xmin, xmax, 100) plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-', label="Zero-one loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-', label="Hinge loss") plt.plot(xx, -np.minimum(xx, 0), 'm-', label="Perceptron loss") plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-', label="Log loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-', label="Squared hinge loss") plt.plot(xx, modified_huber_loss(xx, 1), 'y--', label="Modified Huber loss") plt.ylim((0, 8)) plt.legend(loc="upper right") plt.xlabel(r"Decision function $f(x)$") plt.ylabel("$L(y, f(x))$") plt.show()
bsd-3-clause
minixalpha/spark
python/pyspark/sql/functions.py
2
81722
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A collections of builtin functions """ import math import sys import functools if sys.version < "3": from itertools import imap as map from pyspark import since, SparkContext from pyspark.rdd import _prepare_for_python_RDD, ignore_unicode_prefix from pyspark.serializers import PickleSerializer, AutoBatchedSerializer from pyspark.sql.types import StringType, DataType, _parse_datatype_string from pyspark.sql.column import Column, _to_java_column, _to_seq from pyspark.sql.dataframe import DataFrame def _create_function(name, doc=""): """ Create a function for aggregator by name""" def _(col): sc = SparkContext._active_spark_context jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col) return Column(jc) _.__name__ = name _.__doc__ = doc return _ def _create_binary_mathfunction(name, doc=""): """ Create a binary mathfunction by name""" def _(col1, col2): sc = SparkContext._active_spark_context # users might write ints for simplicity. This would throw an error on the JVM side. jc = getattr(sc._jvm.functions, name)(col1._jc if isinstance(col1, Column) else float(col1), col2._jc if isinstance(col2, Column) else float(col2)) return Column(jc) _.__name__ = name _.__doc__ = doc return _ def _create_window_function(name, doc=''): """ Create a window function by name """ def _(): sc = SparkContext._active_spark_context jc = getattr(sc._jvm.functions, name)() return Column(jc) _.__name__ = name _.__doc__ = 'Window function: ' + doc return _ _lit_doc = """ Creates a :class:`Column` of literal value. >>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1) [Row(height=5, spark_user=True)] """ _functions = { 'lit': _lit_doc, 'col': 'Returns a :class:`Column` based on the given column name.', 'column': 'Returns a :class:`Column` based on the given column name.', 'asc': 'Returns a sort expression based on the ascending order of the given column name.', 'desc': 'Returns a sort expression based on the descending order of the given column name.', 'upper': 'Converts a string expression to upper case.', 'lower': 'Converts a string expression to upper case.', 'sqrt': 'Computes the square root of the specified float value.', 'abs': 'Computes the absolute value.', 'max': 'Aggregate function: returns the maximum value of the expression in a group.', 'min': 'Aggregate function: returns the minimum value of the expression in a group.', 'count': 'Aggregate function: returns the number of items in a group.', 'sum': 'Aggregate function: returns the sum of all values in the expression.', 'avg': 'Aggregate function: returns the average of the values in a group.', 'mean': 'Aggregate function: returns the average of the values in a group.', 'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.', } _functions_1_4 = { # unary math functions 'acos': 'Computes the cosine inverse of the given value; the returned angle is in the range' + '0.0 through pi.', 'asin': 'Computes the sine inverse of the given value; the returned angle is in the range' + '-pi/2 through pi/2.', 'atan': 'Computes the tangent inverse of the given value; the returned angle is in the range' + '-pi/2 through pi/2', 'cbrt': 'Computes the cube-root of the given value.', 'ceil': 'Computes the ceiling of the given value.', 'cos': """Computes the cosine of the given value. :param col: :class:`DoubleType` column, units in radians.""", 'cosh': 'Computes the hyperbolic cosine of the given value.', 'exp': 'Computes the exponential of the given value.', 'expm1': 'Computes the exponential of the given value minus one.', 'floor': 'Computes the floor of the given value.', 'log': 'Computes the natural logarithm of the given value.', 'log10': 'Computes the logarithm of the given value in Base 10.', 'log1p': 'Computes the natural logarithm of the given value plus one.', 'rint': 'Returns the double value that is closest in value to the argument and' + ' is equal to a mathematical integer.', 'signum': 'Computes the signum of the given value.', 'sin': """Computes the sine of the given value. :param col: :class:`DoubleType` column, units in radians.""", 'sinh': 'Computes the hyperbolic sine of the given value.', 'tan': """Computes the tangent of the given value. :param col: :class:`DoubleType` column, units in radians.""", 'tanh': 'Computes the hyperbolic tangent of the given value.', 'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.', 'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.', 'bitwiseNOT': 'Computes bitwise not.', } _collect_list_doc = """ Aggregate function: returns a list of objects with duplicates. >>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',)) >>> df2.agg(collect_list('age')).collect() [Row(collect_list(age)=[2, 5, 5])] """ _collect_set_doc = """ Aggregate function: returns a set of objects with duplicate elements eliminated. >>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',)) >>> df2.agg(collect_set('age')).collect() [Row(collect_set(age)=[5, 2])] """ _functions_1_6 = { # unary math functions 'stddev': 'Aggregate function: returns the unbiased sample standard deviation of' + ' the expression in a group.', 'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' + ' the expression in a group.', 'stddev_pop': 'Aggregate function: returns population standard deviation of' + ' the expression in a group.', 'variance': 'Aggregate function: returns the population variance of the values in a group.', 'var_samp': 'Aggregate function: returns the unbiased variance of the values in a group.', 'var_pop': 'Aggregate function: returns the population variance of the values in a group.', 'skewness': 'Aggregate function: returns the skewness of the values in a group.', 'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.', 'collect_list': _collect_list_doc, 'collect_set': _collect_set_doc } _functions_2_1 = { # unary math functions 'degrees': 'Converts an angle measured in radians to an approximately equivalent angle ' + 'measured in degrees.', 'radians': 'Converts an angle measured in degrees to an approximately equivalent angle ' + 'measured in radians.', } # math functions that take two arguments as input _binary_mathfunctions = { 'atan2': 'Returns the angle theta from the conversion of rectangular coordinates (x, y) to' + 'polar coordinates (r, theta). Units in radians.', 'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.', 'pow': 'Returns the value of the first argument raised to the power of the second argument.', } _window_functions = { 'row_number': """returns a sequential number starting at 1 within a window partition.""", 'dense_rank': """returns the rank of rows within a window partition, without any gaps. The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking sequence when there are ties. That is, if you were ranking a competition using dense_rank and had three people tie for second place, you would say that all three were in second place and that the next person came in third. Rank would give me sequential numbers, making the person that came in third place (after the ties) would register as coming in fifth. This is equivalent to the DENSE_RANK function in SQL.""", 'rank': """returns the rank of rows within a window partition. The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking sequence when there are ties. That is, if you were ranking a competition using dense_rank and had three people tie for second place, you would say that all three were in second place and that the next person came in third. Rank would give me sequential numbers, making the person that came in third place (after the ties) would register as coming in fifth. This is equivalent to the RANK function in SQL.""", 'cume_dist': """returns the cumulative distribution of values within a window partition, i.e. the fraction of rows that are below the current row.""", 'percent_rank': """returns the relative rank (i.e. percentile) of rows within a window partition.""", } for _name, _doc in _functions.items(): globals()[_name] = since(1.3)(_create_function(_name, _doc)) for _name, _doc in _functions_1_4.items(): globals()[_name] = since(1.4)(_create_function(_name, _doc)) for _name, _doc in _binary_mathfunctions.items(): globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc)) for _name, _doc in _window_functions.items(): globals()[_name] = since(1.6)(_create_window_function(_name, _doc)) for _name, _doc in _functions_1_6.items(): globals()[_name] = since(1.6)(_create_function(_name, _doc)) for _name, _doc in _functions_2_1.items(): globals()[_name] = since(2.1)(_create_function(_name, _doc)) del _name, _doc @since(1.3) def approxCountDistinct(col, rsd=None): """ .. note:: Deprecated in 2.1, use :func:`approx_count_distinct` instead. """ return approx_count_distinct(col, rsd) @since(2.1) def approx_count_distinct(col, rsd=None): """Aggregate function: returns a new :class:`Column` for approximate distinct count of column `col`. :param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more efficient to use :func:`countDistinct` >>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect() [Row(distinct_ages=2)] """ sc = SparkContext._active_spark_context if rsd is None: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col)) else: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd) return Column(jc) @since(1.6) def broadcast(df): """Marks a DataFrame as small enough for use in broadcast joins.""" sc = SparkContext._active_spark_context return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx) @since(1.4) def coalesce(*cols): """Returns the first column that is not null. >>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b")) >>> cDf.show() +----+----+ | a| b| +----+----+ |null|null| | 1|null| |null| 2| +----+----+ >>> cDf.select(coalesce(cDf["a"], cDf["b"])).show() +--------------+ |coalesce(a, b)| +--------------+ | null| | 1| | 2| +--------------+ >>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show() +----+----+----------------+ | a| b|coalesce(a, 0.0)| +----+----+----------------+ |null|null| 0.0| | 1|null| 1.0| |null| 2| 0.0| +----+----+----------------+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column)) return Column(jc) @since(1.6) def corr(col1, col2): """Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1`` and ``col2``. >>> a = range(20) >>> b = [2 * x for x in range(20)] >>> df = spark.createDataFrame(zip(a, b), ["a", "b"]) >>> df.agg(corr("a", "b").alias('c')).collect() [Row(c=1.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2))) @since(2.0) def covar_pop(col1, col2): """Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``. >>> a = [1] * 10 >>> b = [1] * 10 >>> df = spark.createDataFrame(zip(a, b), ["a", "b"]) >>> df.agg(covar_pop("a", "b").alias('c')).collect() [Row(c=0.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2))) @since(2.0) def covar_samp(col1, col2): """Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``. >>> a = [1] * 10 >>> b = [1] * 10 >>> df = spark.createDataFrame(zip(a, b), ["a", "b"]) >>> df.agg(covar_samp("a", "b").alias('c')).collect() [Row(c=0.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2))) @since(1.3) def countDistinct(col, *cols): """Returns a new :class:`Column` for distinct count of ``col`` or ``cols``. >>> df.agg(countDistinct(df.age, df.name).alias('c')).collect() [Row(c=2)] >>> df.agg(countDistinct("age", "name").alias('c')).collect() [Row(c=2)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column)) return Column(jc) @since(1.3) def first(col, ignorenulls=False): """Aggregate function: returns the first value in a group. The function by default returns the first values it sees. It will return the first non-null value it sees when ignoreNulls is set to true. If all values are null, then null is returned. """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls) return Column(jc) @since(2.0) def grouping(col): """ Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated or not, returns 1 for aggregated or 0 for not aggregated in the result set. >>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show() +-----+--------------+--------+ | name|grouping(name)|sum(age)| +-----+--------------+--------+ | null| 1| 7| |Alice| 0| 2| | Bob| 0| 5| +-----+--------------+--------+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.grouping(_to_java_column(col)) return Column(jc) @since(2.0) def grouping_id(*cols): """ Aggregate function: returns the level of grouping, equals to (grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn) .. note:: The list of columns should match with grouping columns exactly, or empty (means all the grouping columns). >>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show() +-----+-------------+--------+ | name|grouping_id()|sum(age)| +-----+-------------+--------+ | null| 1| 7| |Alice| 0| 2| | Bob| 0| 5| +-----+-------------+--------+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column)) return Column(jc) @since(1.6) def input_file_name(): """Creates a string column for the file name of the current Spark task. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.input_file_name()) @since(1.6) def isnan(col): """An expression that returns true iff the column is NaN. >>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b")) >>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect() [Row(r1=False, r2=False), Row(r1=True, r2=True)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.isnan(_to_java_column(col))) @since(1.6) def isnull(col): """An expression that returns true iff the column is null. >>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b")) >>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect() [Row(r1=False, r2=False), Row(r1=True, r2=True)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.isnull(_to_java_column(col))) @since(1.3) def last(col, ignorenulls=False): """Aggregate function: returns the last value in a group. The function by default returns the last values it sees. It will return the last non-null value it sees when ignoreNulls is set to true. If all values are null, then null is returned. """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls) return Column(jc) @since(1.6) def monotonically_increasing_id(): """A column that generates monotonically increasing 64-bit integers. The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive. The current implementation puts the partition ID in the upper 31 bits, and the record number within each partition in the lower 33 bits. The assumption is that the data frame has less than 1 billion partitions, and each partition has less than 8 billion records. As an example, consider a :class:`DataFrame` with two partitions, each with 3 records. This expression would return the following IDs: 0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594. >>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1']) >>> df0.select(monotonically_increasing_id().alias('id')).collect() [Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.monotonically_increasing_id()) @since(1.6) def nanvl(col1, col2): """Returns col1 if it is not NaN, or col2 if col1 is NaN. Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`). >>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b")) >>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect() [Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2))) @ignore_unicode_prefix @since(1.4) def rand(seed=None): """Generates a random column with independent and identically distributed (i.i.d.) samples from U[0.0, 1.0]. >>> df.withColumn('rand', rand(seed=42) * 3).collect() [Row(age=2, name=u'Alice', rand=1.1568609015300986), Row(age=5, name=u'Bob', rand=1.403379671529166)] """ sc = SparkContext._active_spark_context if seed is not None: jc = sc._jvm.functions.rand(seed) else: jc = sc._jvm.functions.rand() return Column(jc) @ignore_unicode_prefix @since(1.4) def randn(seed=None): """Generates a column with independent and identically distributed (i.i.d.) samples from the standard normal distribution. >>> df.withColumn('randn', randn(seed=42)).collect() [Row(age=2, name=u'Alice', randn=-0.7556247885860078), Row(age=5, name=u'Bob', randn=-0.0861619008451133)] """ sc = SparkContext._active_spark_context if seed is not None: jc = sc._jvm.functions.randn(seed) else: jc = sc._jvm.functions.randn() return Column(jc) @since(1.5) def round(col, scale=0): """ Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0 or at integral part when `scale` < 0. >>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect() [Row(r=3.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.round(_to_java_column(col), scale)) @since(2.0) def bround(col, scale=0): """ Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0 or at integral part when `scale` < 0. >>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect() [Row(r=2.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.bround(_to_java_column(col), scale)) @since(1.5) def shiftLeft(col, numBits): """Shift the given value numBits left. >>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect() [Row(r=42)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits)) @since(1.5) def shiftRight(col, numBits): """(Signed) shift the given value numBits right. >>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect() [Row(r=21)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits) return Column(jc) @since(1.5) def shiftRightUnsigned(col, numBits): """Unsigned shift the given value numBits right. >>> df = spark.createDataFrame([(-42,)], ['a']) >>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect() [Row(r=9223372036854775787)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits) return Column(jc) @since(1.6) def spark_partition_id(): """A column for partition ID. .. note:: This is indeterministic because it depends on data partitioning and task scheduling. >>> df.repartition(1).select(spark_partition_id().alias("pid")).collect() [Row(pid=0), Row(pid=0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.spark_partition_id()) @since(1.5) def expr(str): """Parses the expression string into the column that it represents >>> df.select(expr("length(name)")).collect() [Row(length(name)=5), Row(length(name)=3)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.expr(str)) @ignore_unicode_prefix @since(1.4) def struct(*cols): """Creates a new struct column. :param cols: list of column names (string) or list of :class:`Column` expressions >>> df.select(struct('age', 'name').alias("struct")).collect() [Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))] >>> df.select(struct([df.age, df.name]).alias("struct")).collect() [Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))] """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column)) return Column(jc) @since(1.5) def greatest(*cols): """ Returns the greatest value of the list of column names, skipping null values. This function takes at least 2 parameters. It will return null iff all parameters are null. >>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c']) >>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect() [Row(greatest=4)] """ if len(cols) < 2: raise ValueError("greatest should take at least two columns") sc = SparkContext._active_spark_context return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column))) @since(1.5) def least(*cols): """ Returns the least value of the list of column names, skipping null values. This function takes at least 2 parameters. It will return null iff all parameters are null. >>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c']) >>> df.select(least(df.a, df.b, df.c).alias("least")).collect() [Row(least=1)] """ if len(cols) < 2: raise ValueError("least should take at least two columns") sc = SparkContext._active_spark_context return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column))) @since(1.4) def when(condition, value): """Evaluates a list of conditions and returns one of multiple possible result expressions. If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions. :param condition: a boolean :class:`Column` expression. :param value: a literal value, or a :class:`Column` expression. >>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect() [Row(age=3), Row(age=4)] >>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect() [Row(age=3), Row(age=None)] """ sc = SparkContext._active_spark_context if not isinstance(condition, Column): raise TypeError("condition should be a Column") v = value._jc if isinstance(value, Column) else value jc = sc._jvm.functions.when(condition._jc, v) return Column(jc) @since(1.5) def log(arg1, arg2=None): """Returns the first argument-based logarithm of the second argument. If there is only one argument, then this takes the natural logarithm of the argument. >>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect() ['0.30102', '0.69897'] >>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect() ['0.69314', '1.60943'] """ sc = SparkContext._active_spark_context if arg2 is None: jc = sc._jvm.functions.log(_to_java_column(arg1)) else: jc = sc._jvm.functions.log(arg1, _to_java_column(arg2)) return Column(jc) @since(1.5) def log2(col): """Returns the base-2 logarithm of the argument. >>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect() [Row(log2=2.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.log2(_to_java_column(col))) @since(1.5) @ignore_unicode_prefix def conv(col, fromBase, toBase): """ Convert a number in a string column from one base to another. >>> df = spark.createDataFrame([("010101",)], ['n']) >>> df.select(conv(df.n, 2, 16).alias('hex')).collect() [Row(hex=u'15')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase)) @since(1.5) def factorial(col): """ Computes the factorial of the given value. >>> df = spark.createDataFrame([(5,)], ['n']) >>> df.select(factorial(df.n).alias('f')).collect() [Row(f=120)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.factorial(_to_java_column(col))) # --------------- Window functions ------------------------ @since(1.4) def lag(col, count=1, default=None): """ Window function: returns the value that is `offset` rows before the current row, and `defaultValue` if there is less than `offset` rows before the current row. For example, an `offset` of one will return the previous row at any given point in the window partition. This is equivalent to the LAG function in SQL. :param col: name of column or expression :param count: number of row to extend :param default: default value """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lag(_to_java_column(col), count, default)) @since(1.4) def lead(col, count=1, default=None): """ Window function: returns the value that is `offset` rows after the current row, and `defaultValue` if there is less than `offset` rows after the current row. For example, an `offset` of one will return the next row at any given point in the window partition. This is equivalent to the LEAD function in SQL. :param col: name of column or expression :param count: number of row to extend :param default: default value """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lead(_to_java_column(col), count, default)) @since(1.4) def ntile(n): """ Window function: returns the ntile group id (from 1 to `n` inclusive) in an ordered window partition. For example, if `n` is 4, the first quarter of the rows will get value 1, the second quarter will get 2, the third quarter will get 3, and the last quarter will get 4. This is equivalent to the NTILE function in SQL. :param n: an integer """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.ntile(int(n))) # ---------------------- Date/Timestamp functions ------------------------------ @since(1.5) def current_date(): """ Returns the current date as a :class:`DateType` column. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.current_date()) def current_timestamp(): """ Returns the current timestamp as a :class:`TimestampType` column. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.current_timestamp()) @ignore_unicode_prefix @since(1.5) def date_format(date, format): """ Converts a date/timestamp/string to a value of string in the format specified by the date format given by the second argument. A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All pattern letters of the Java class `java.text.SimpleDateFormat` can be used. .. note:: Use when ever possible specialized functions like `year`. These benefit from a specialized implementation. >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect() [Row(date=u'04/08/2015')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_format(_to_java_column(date), format)) @since(1.5) def year(col): """ Extract the year of a given date as integer. >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(year('dt').alias('year')).collect() [Row(year=2015)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.year(_to_java_column(col))) @since(1.5) def quarter(col): """ Extract the quarter of a given date as integer. >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(quarter('dt').alias('quarter')).collect() [Row(quarter=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.quarter(_to_java_column(col))) @since(1.5) def month(col): """ Extract the month of a given date as integer. >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(month('dt').alias('month')).collect() [Row(month=4)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.month(_to_java_column(col))) @since(1.5) def dayofmonth(col): """ Extract the day of the month of a given date as integer. >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(dayofmonth('dt').alias('day')).collect() [Row(day=8)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.dayofmonth(_to_java_column(col))) @since(1.5) def dayofyear(col): """ Extract the day of the year of a given date as integer. >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(dayofyear('dt').alias('day')).collect() [Row(day=98)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.dayofyear(_to_java_column(col))) @since(1.5) def hour(col): """ Extract the hours of a given date as integer. >>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts']) >>> df.select(hour('ts').alias('hour')).collect() [Row(hour=13)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.hour(_to_java_column(col))) @since(1.5) def minute(col): """ Extract the minutes of a given date as integer. >>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts']) >>> df.select(minute('ts').alias('minute')).collect() [Row(minute=8)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.minute(_to_java_column(col))) @since(1.5) def second(col): """ Extract the seconds of a given date as integer. >>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts']) >>> df.select(second('ts').alias('second')).collect() [Row(second=15)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.second(_to_java_column(col))) @since(1.5) def weekofyear(col): """ Extract the week number of a given date as integer. >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(weekofyear(df.dt).alias('week')).collect() [Row(week=15)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.weekofyear(_to_java_column(col))) @since(1.5) def date_add(start, days): """ Returns the date that is `days` days after `start` >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(date_add(df.dt, 1).alias('next_date')).collect() [Row(next_date=datetime.date(2015, 4, 9))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_add(_to_java_column(start), days)) @since(1.5) def date_sub(start, days): """ Returns the date that is `days` days before `start` >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect() [Row(prev_date=datetime.date(2015, 4, 7))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_sub(_to_java_column(start), days)) @since(1.5) def datediff(end, start): """ Returns the number of days from `start` to `end`. >>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2']) >>> df.select(datediff(df.d2, df.d1).alias('diff')).collect() [Row(diff=32)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start))) @since(1.5) def add_months(start, months): """ Returns the date that is `months` months after `start` >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(add_months(df.dt, 1).alias('next_month')).collect() [Row(next_month=datetime.date(2015, 5, 8))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.add_months(_to_java_column(start), months)) @since(1.5) def months_between(date1, date2): """ Returns the number of months between date1 and date2. >>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2']) >>> df.select(months_between(df.date1, df.date2).alias('months')).collect() [Row(months=3.9495967...)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.months_between(_to_java_column(date1), _to_java_column(date2))) @since(2.2) def to_date(col, format=None): """Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or :class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType` using the optionally specified format. Specify formats according to `SimpleDateFormats <http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html>`_. By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format is omitted (equivalent to ``col.cast("date")``). >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_date(df.t).alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] """ sc = SparkContext._active_spark_context if format is None: jc = sc._jvm.functions.to_date(_to_java_column(col)) else: jc = sc._jvm.functions.to_date(_to_java_column(col), format) return Column(jc) @since(2.2) def to_timestamp(col, format=None): """Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or :class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType` using the optionally specified format. Specify formats according to `SimpleDateFormats <http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html>`_. By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format is omitted (equivalent to ``col.cast("timestamp")``). >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_timestamp(df.t).alias('dt')).collect() [Row(dt=datetime.datetime(1997, 2, 28, 10, 30))] >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect() [Row(dt=datetime.datetime(1997, 2, 28, 10, 30))] """ sc = SparkContext._active_spark_context if format is None: jc = sc._jvm.functions.to_timestamp(_to_java_column(col)) else: jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format) return Column(jc) @since(1.5) def trunc(date, format): """ Returns date truncated to the unit specified by the format. :param format: 'year', 'YYYY', 'yy' or 'month', 'mon', 'mm' >>> df = spark.createDataFrame([('1997-02-28',)], ['d']) >>> df.select(trunc(df.d, 'year').alias('year')).collect() [Row(year=datetime.date(1997, 1, 1))] >>> df.select(trunc(df.d, 'mon').alias('month')).collect() [Row(month=datetime.date(1997, 2, 1))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.trunc(_to_java_column(date), format)) @since(1.5) def next_day(date, dayOfWeek): """ Returns the first date which is later than the value of the date column. Day of the week parameter is case insensitive, and accepts: "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun". >>> df = spark.createDataFrame([('2015-07-27',)], ['d']) >>> df.select(next_day(df.d, 'Sun').alias('date')).collect() [Row(date=datetime.date(2015, 8, 2))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek)) @since(1.5) def last_day(date): """ Returns the last day of the month which the given date belongs to. >>> df = spark.createDataFrame([('1997-02-10',)], ['d']) >>> df.select(last_day(df.d).alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.last_day(_to_java_column(date))) @ignore_unicode_prefix @since(1.5) def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"): """ Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string representing the timestamp of that moment in the current system time zone in the given format. >>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles") >>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time']) >>> time_df.select(from_unixtime('unix_time').alias('ts')).collect() [Row(ts=u'2015-04-08 00:00:00')] >>> spark.conf.unset("spark.sql.session.timeZone") """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format)) @since(1.5) def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'): """ Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default) to Unix time stamp (in seconds), using the default timezone and the default locale, return null if fail. if `timestamp` is None, then it returns current timestamp. >>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles") >>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect() [Row(unix_time=1428476400)] >>> spark.conf.unset("spark.sql.session.timeZone") """ sc = SparkContext._active_spark_context if timestamp is None: return Column(sc._jvm.functions.unix_timestamp()) return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format)) @since(1.5) def from_utc_timestamp(timestamp, tz): """ Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in UTC, and renders that time as a timestamp in the given time zone. For example, 'GMT+1' would yield '2017-07-14 03:40:00.0'. >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(from_utc_timestamp(df.t, "PST").alias('local_time')).collect() [Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz)) @since(1.5) def to_utc_timestamp(timestamp, tz): """ Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in the given time zone, and renders that time as a timestamp in UTC. For example, 'GMT+1' would yield '2017-07-14 01:40:00.0'. >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['ts']) >>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect() [Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz)) @since(2.0) @ignore_unicode_prefix def window(timeColumn, windowDuration, slideDuration=None, startTime=None): """Bucketize rows into one or more time windows given a timestamp specifying column. Window starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in the order of months are not supported. The time column must be of :class:`pyspark.sql.types.TimestampType`. Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'. If the ``slideDuration`` is not provided, the windows will be tumbling windows. The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`. The output column will be a struct called 'window' by default with the nested columns 'start' and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`. >>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val") >>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum")) >>> w.select(w.window.start.cast("string").alias("start"), ... w.window.end.cast("string").alias("end"), "sum").collect() [Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)] """ def check_string_field(field, fieldName): if not field or type(field) is not str: raise TypeError("%s should be provided as a string" % fieldName) sc = SparkContext._active_spark_context time_col = _to_java_column(timeColumn) check_string_field(windowDuration, "windowDuration") if slideDuration and startTime: check_string_field(slideDuration, "slideDuration") check_string_field(startTime, "startTime") res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime) elif slideDuration: check_string_field(slideDuration, "slideDuration") res = sc._jvm.functions.window(time_col, windowDuration, slideDuration) elif startTime: check_string_field(startTime, "startTime") res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime) else: res = sc._jvm.functions.window(time_col, windowDuration) return Column(res) # ---------------------------- misc functions ---------------------------------- @since(1.5) @ignore_unicode_prefix def crc32(col): """ Calculates the cyclic redundancy check value (CRC32) of a binary column and returns the value as a bigint. >>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect() [Row(crc32=2743272264)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.crc32(_to_java_column(col))) @ignore_unicode_prefix @since(1.5) def md5(col): """Calculates the MD5 digest and returns the value as a 32 character hex string. >>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect() [Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.md5(_to_java_column(col)) return Column(jc) @ignore_unicode_prefix @since(1.5) def sha1(col): """Returns the hex string result of SHA-1. >>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect() [Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.sha1(_to_java_column(col)) return Column(jc) @ignore_unicode_prefix @since(1.5) def sha2(col, numBits): """Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384, and SHA-512). The numBits indicates the desired bit length of the result, which must have a value of 224, 256, 384, 512, or 0 (which is equivalent to 256). >>> digests = df.select(sha2(df.name, 256).alias('s')).collect() >>> digests[0] Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043') >>> digests[1] Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961') """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.sha2(_to_java_column(col), numBits) return Column(jc) @since(2.0) def hash(*cols): """Calculates the hash code of given columns, and returns the result as an int column. >>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect() [Row(hash=-757602832)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column)) return Column(jc) # ---------------------- String/Binary functions ------------------------------ _string_functions = { 'ascii': 'Computes the numeric value of the first character of the string column.', 'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.', 'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.', 'initcap': 'Returns a new string column by converting the first letter of each word to ' + 'uppercase. Words are delimited by whitespace.', 'lower': 'Converts a string column to lower case.', 'upper': 'Converts a string column to upper case.', 'reverse': 'Reverses the string column and returns it as a new string column.', 'ltrim': 'Trim the spaces from left end for the specified string value.', 'rtrim': 'Trim the spaces from right end for the specified string value.', 'trim': 'Trim the spaces from both ends for the specified string column.', } for _name, _doc in _string_functions.items(): globals()[_name] = since(1.5)(_create_function(_name, _doc)) del _name, _doc @since(1.5) @ignore_unicode_prefix def concat(*cols): """ Concatenates multiple input string columns together into a single string column. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat(df.s, df.d).alias('s')).collect() [Row(s=u'abcd123')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column))) @since(1.5) @ignore_unicode_prefix def concat_ws(sep, *cols): """ Concatenates multiple input string columns together into a single string column, using the given separator. >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect() [Row(s=u'abcd-123')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column))) @since(1.5) def decode(col, charset): """ Computes the first argument into a string from a binary using the provided character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16'). """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.decode(_to_java_column(col), charset)) @since(1.5) def encode(col, charset): """ Computes the first argument into a binary from a string using the provided character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16'). """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.encode(_to_java_column(col), charset)) @ignore_unicode_prefix @since(1.5) def format_number(col, d): """ Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places with HALF_EVEN round mode, and returns the result as a string. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect() [Row(v=u'5.0000')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_number(_to_java_column(col), d)) @ignore_unicode_prefix @since(1.5) def format_string(format, *cols): """ Formats the arguments in printf-style and returns the result as a string column. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> df = spark.createDataFrame([(5, "hello")], ['a', 'b']) >>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect() [Row(v=u'5 hello')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column))) @since(1.5) def instr(str, substr): """ Locate the position of the first occurrence of substr column in the given string. Returns null if either of the arguments are null. .. note:: The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(instr(df.s, 'b').alias('s')).collect() [Row(s=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.instr(_to_java_column(str), substr)) @since(1.5) @ignore_unicode_prefix def substring(str, pos, len): """ Substring starts at `pos` and is of length `len` when str is String type or returns the slice of byte array that starts at `pos` in byte and is of length `len` when str is Binary type. .. note:: The position is not zero based, but 1 based index. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(substring(df.s, 1, 2).alias('s')).collect() [Row(s=u'ab')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len)) @since(1.5) @ignore_unicode_prefix def substring_index(str, delim, count): """ Returns the substring from string str before count occurrences of the delimiter delim. If count is positive, everything the left of the final delimiter (counting from left) is returned. If count is negative, every to the right of the final delimiter (counting from the right) is returned. substring_index performs a case-sensitive match when searching for delim. >>> df = spark.createDataFrame([('a.b.c.d',)], ['s']) >>> df.select(substring_index(df.s, '.', 2).alias('s')).collect() [Row(s=u'a.b')] >>> df.select(substring_index(df.s, '.', -3).alias('s')).collect() [Row(s=u'b.c.d')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count)) @ignore_unicode_prefix @since(1.5) def levenshtein(left, right): """Computes the Levenshtein distance of the two given strings. >>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r']) >>> df0.select(levenshtein('l', 'r').alias('d')).collect() [Row(d=3)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right)) return Column(jc) @since(1.5) def locate(substr, str, pos=1): """ Locate the position of the first occurrence of substr in a string column, after position pos. .. note:: The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. :param substr: a string :param str: a Column of :class:`pyspark.sql.types.StringType` :param pos: start position (zero based) >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(locate('b', df.s, 1).alias('s')).collect() [Row(s=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos)) @since(1.5) @ignore_unicode_prefix def lpad(col, len, pad): """ Left-pad the string column to width `len` with `pad`. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(lpad(df.s, 6, '#').alias('s')).collect() [Row(s=u'##abcd')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad)) @since(1.5) @ignore_unicode_prefix def rpad(col, len, pad): """ Right-pad the string column to width `len` with `pad`. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(rpad(df.s, 6, '#').alias('s')).collect() [Row(s=u'abcd##')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad)) @since(1.5) @ignore_unicode_prefix def repeat(col, n): """ Repeats a string column n times, and returns it as a new string column. >>> df = spark.createDataFrame([('ab',)], ['s',]) >>> df.select(repeat(df.s, 3).alias('s')).collect() [Row(s=u'ababab')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.repeat(_to_java_column(col), n)) @since(1.5) @ignore_unicode_prefix def split(str, pattern): """ Splits str around pattern (pattern is a regular expression). .. note:: pattern is a string represent the regular expression. >>> df = spark.createDataFrame([('ab12cd',)], ['s',]) >>> df.select(split(df.s, '[0-9]+').alias('s')).collect() [Row(s=[u'ab', u'cd'])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.split(_to_java_column(str), pattern)) @ignore_unicode_prefix @since(1.5) def regexp_extract(str, pattern, idx): """Extract a specific group matched by a Java regex, from the specified string column. If the regex did not match, or the specified group did not match, an empty string is returned. >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_extract('str', '(\d+)-(\d+)', 1).alias('d')).collect() [Row(d=u'100')] >>> df = spark.createDataFrame([('foo',)], ['str']) >>> df.select(regexp_extract('str', '(\d+)', 1).alias('d')).collect() [Row(d=u'')] >>> df = spark.createDataFrame([('aaaac',)], ['str']) >>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect() [Row(d=u'')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx) return Column(jc) @ignore_unicode_prefix @since(1.5) def regexp_replace(str, pattern, replacement): """Replace all substrings of the specified string value that match regexp with rep. >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_replace('str', '(\\d+)', '--').alias('d')).collect() [Row(d=u'-----')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement) return Column(jc) @ignore_unicode_prefix @since(1.5) def initcap(col): """Translate the first letter of each word to upper case in the sentence. >>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect() [Row(v=u'Ab Cd')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.initcap(_to_java_column(col))) @since(1.5) @ignore_unicode_prefix def soundex(col): """ Returns the SoundEx encoding for a string >>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name']) >>> df.select(soundex(df.name).alias("soundex")).collect() [Row(soundex=u'P362'), Row(soundex=u'U612')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.soundex(_to_java_column(col))) @ignore_unicode_prefix @since(1.5) def bin(col): """Returns the string representation of the binary value of the given column. >>> df.select(bin(df.age).alias('c')).collect() [Row(c=u'10'), Row(c=u'101')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.bin(_to_java_column(col)) return Column(jc) @ignore_unicode_prefix @since(1.5) def hex(col): """Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`, :class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or :class:`pyspark.sql.types.LongType`. >>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect() [Row(hex(a)=u'414243', hex(b)=u'3')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.hex(_to_java_column(col)) return Column(jc) @ignore_unicode_prefix @since(1.5) def unhex(col): """Inverse of hex. Interprets each pair of characters as a hexadecimal number and converts to the byte representation of number. >>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect() [Row(unhex(a)=bytearray(b'ABC'))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.unhex(_to_java_column(col))) @ignore_unicode_prefix @since(1.5) def length(col): """Calculates the length of a string or binary expression. >>> spark.createDataFrame([('ABC',)], ['a']).select(length('a').alias('length')).collect() [Row(length=3)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.length(_to_java_column(col))) @ignore_unicode_prefix @since(1.5) def translate(srcCol, matching, replace): """A function translate any character in the `srcCol` by a character in `matching`. The characters in `replace` is corresponding to the characters in `matching`. The translate will happen when any character in the string matching with the character in the `matching`. >>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\ ... .alias('r')).collect() [Row(r=u'1a2s3ae')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace)) # ---------------------- Collection functions ------------------------------ @ignore_unicode_prefix @since(2.0) def create_map(*cols): """Creates a new map column. :param cols: list of column names (string) or list of :class:`Column` expressions that grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...). >>> df.select(create_map('name', 'age').alias("map")).collect() [Row(map={u'Alice': 2}), Row(map={u'Bob': 5})] >>> df.select(create_map([df.name, df.age]).alias("map")).collect() [Row(map={u'Alice': 2}), Row(map={u'Bob': 5})] """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column)) return Column(jc) @since(1.4) def array(*cols): """Creates a new array column. :param cols: list of column names (string) or list of :class:`Column` expressions that have the same data type. >>> df.select(array('age', 'age').alias("arr")).collect() [Row(arr=[2, 2]), Row(arr=[5, 5])] >>> df.select(array([df.age, df.age]).alias("arr")).collect() [Row(arr=[2, 2]), Row(arr=[5, 5])] """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column)) return Column(jc) @since(1.5) def array_contains(col, value): """ Collection function: returns null if the array is null, true if the array contains the given value, and false otherwise. :param col: name of column containing array :param value: value to check for in array >>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data']) >>> df.select(array_contains(df.data, "a")).collect() [Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_contains(_to_java_column(col), value)) @since(1.4) def explode(col): """Returns a new row for each element in the given array or map. >>> from pyspark.sql import Row >>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})]) >>> eDF.select(explode(eDF.intlist).alias("anInt")).collect() [Row(anInt=1), Row(anInt=2), Row(anInt=3)] >>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show() +---+-----+ |key|value| +---+-----+ | a| b| +---+-----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.explode(_to_java_column(col)) return Column(jc) @since(2.1) def posexplode(col): """Returns a new row for each element with position in the given array or map. >>> from pyspark.sql import Row >>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})]) >>> eDF.select(posexplode(eDF.intlist)).collect() [Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)] >>> eDF.select(posexplode(eDF.mapfield)).show() +---+---+-----+ |pos|key|value| +---+---+-----+ | 0| a| b| +---+---+-----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.posexplode(_to_java_column(col)) return Column(jc) @since(2.3) def explode_outer(col): """Returns a new row for each element in the given array or map. Unlike explode, if the array/map is null or empty then null is produced. >>> df = spark.createDataFrame( ... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)], ... ("id", "an_array", "a_map") ... ) >>> df.select("id", "an_array", explode_outer("a_map")).show() +---+----------+----+-----+ | id| an_array| key|value| +---+----------+----+-----+ | 1|[foo, bar]| x| 1.0| | 2| []|null| null| | 3| null|null| null| +---+----------+----+-----+ >>> df.select("id", "a_map", explode_outer("an_array")).show() +---+-------------+----+ | id| a_map| col| +---+-------------+----+ | 1|Map(x -> 1.0)| foo| | 1|Map(x -> 1.0)| bar| | 2| Map()|null| | 3| null|null| +---+-------------+----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.explode_outer(_to_java_column(col)) return Column(jc) @since(2.3) def posexplode_outer(col): """Returns a new row for each element with position in the given array or map. Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced. >>> df = spark.createDataFrame( ... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)], ... ("id", "an_array", "a_map") ... ) >>> df.select("id", "an_array", posexplode_outer("a_map")).show() +---+----------+----+----+-----+ | id| an_array| pos| key|value| +---+----------+----+----+-----+ | 1|[foo, bar]| 0| x| 1.0| | 2| []|null|null| null| | 3| null|null|null| null| +---+----------+----+----+-----+ >>> df.select("id", "a_map", posexplode_outer("an_array")).show() +---+-------------+----+----+ | id| a_map| pos| col| +---+-------------+----+----+ | 1|Map(x -> 1.0)| 0| foo| | 1|Map(x -> 1.0)| 1| bar| | 2| Map()|null|null| | 3| null|null|null| +---+-------------+----+----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.posexplode_outer(_to_java_column(col)) return Column(jc) @ignore_unicode_prefix @since(1.6) def get_json_object(col, path): """ Extracts json object from a json string based on json path specified, and returns json string of the extracted json object. It will return null if the input json string is invalid. :param col: string column in json format :param path: path to the json object to extract >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\ ... get_json_object(df.jstring, '$.f2').alias("c1") ).collect() [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.get_json_object(_to_java_column(col), path) return Column(jc) @ignore_unicode_prefix @since(1.6) def json_tuple(col, *fields): """Creates a new row for a json column according to the given field names. :param col: string column in json format :param fields: list of fields to extract >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect() [Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields)) return Column(jc) @since(2.1) def from_json(col, schema, options={}): """ Parses a column containing a JSON string into a :class:`StructType` or :class:`ArrayType` of :class:`StructType`\\s with the specified schema. Returns `null`, in the case of an unparseable string. :param col: string column in json format :param schema: a StructType or ArrayType of StructType to use when parsing the json column. :param options: options to control parsing. accepts the same options as the json datasource .. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also supported for ``schema``. >>> from pyspark.sql.types import * >>> data = [(1, '''{"a": 1}''')] >>> schema = StructType([StructField("a", IntegerType())]) >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(from_json(df.value, schema).alias("json")).collect() [Row(json=Row(a=1))] >>> df.select(from_json(df.value, "a INT").alias("json")).collect() [Row(json=Row(a=1))] >>> data = [(1, '''[{"a": 1}]''')] >>> schema = ArrayType(StructType([StructField("a", IntegerType())])) >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(from_json(df.value, schema).alias("json")).collect() [Row(json=[Row(a=1)])] """ sc = SparkContext._active_spark_context if isinstance(schema, DataType): schema = schema.json() jc = sc._jvm.functions.from_json(_to_java_column(col), schema, options) return Column(jc) @ignore_unicode_prefix @since(2.1) def to_json(col, options={}): """ Converts a column containing a :class:`StructType`, :class:`ArrayType` of :class:`StructType`\\s, a :class:`MapType` or :class:`ArrayType` of :class:`MapType`\\s into a JSON string. Throws an exception, in the case of an unsupported type. :param col: name of column containing the struct, array of the structs, the map or array of the maps. :param options: options to control converting. accepts the same options as the json datasource >>> from pyspark.sql import Row >>> from pyspark.sql.types import * >>> data = [(1, Row(name='Alice', age=2))] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_json(df.value).alias("json")).collect() [Row(json=u'{"age":2,"name":"Alice"}')] >>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_json(df.value).alias("json")).collect() [Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')] >>> data = [(1, {"name": "Alice"})] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_json(df.value).alias("json")).collect() [Row(json=u'{"name":"Alice"}')] >>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_json(df.value).alias("json")).collect() [Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.to_json(_to_java_column(col), options) return Column(jc) @since(1.5) def size(col): """ Collection function: returns the length of the array or map stored in the column. :param col: name of column or expression >>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data']) >>> df.select(size(df.data)).collect() [Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.size(_to_java_column(col))) @since(1.5) def sort_array(col, asc=True): """ Collection function: sorts the input array in ascending or descending order according to the natural ordering of the array elements. :param col: name of column or expression >>> df = spark.createDataFrame([([2, 1, 3],),([1],),([],)], ['data']) >>> df.select(sort_array(df.data).alias('r')).collect() [Row(r=[1, 2, 3]), Row(r=[1]), Row(r=[])] >>> df.select(sort_array(df.data, asc=False).alias('r')).collect() [Row(r=[3, 2, 1]), Row(r=[1]), Row(r=[])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc)) @since(2.3) def map_keys(col): """ Collection function: Returns an unordered array containing the keys of the map. :param col: name of column or expression >>> from pyspark.sql.functions import map_keys >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data") >>> df.select(map_keys("data").alias("keys")).show() +------+ | keys| +------+ |[1, 2]| +------+ """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.map_keys(_to_java_column(col))) @since(2.3) def map_values(col): """ Collection function: Returns an unordered array containing the values of the map. :param col: name of column or expression >>> from pyspark.sql.functions import map_values >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data") >>> df.select(map_values("data").alias("values")).show() +------+ |values| +------+ |[a, b]| +------+ """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.map_values(_to_java_column(col))) # ---------------------------- User Defined Function ---------------------------------- def _wrap_function(sc, func, returnType): command = (func, returnType) pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command) return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec, sc.pythonVer, broadcast_vars, sc._javaAccumulator) class UserDefinedFunction(object): """ User defined function in Python .. versionadded:: 1.3 """ def __init__(self, func, returnType, name=None, vectorized=False): if not callable(func): raise TypeError( "Not a function or callable (__call__ is not defined): " "{0}".format(type(func))) self.func = func self._returnType = returnType # Stores UserDefinedPythonFunctions jobj, once initialized self._returnType_placeholder = None self._judf_placeholder = None self._name = name or ( func.__name__ if hasattr(func, '__name__') else func.__class__.__name__) self._vectorized = vectorized @property def returnType(self): # This makes sure this is called after SparkContext is initialized. # ``_parse_datatype_string`` accesses to JVM for parsing a DDL formatted string. if self._returnType_placeholder is None: if isinstance(self._returnType, DataType): self._returnType_placeholder = self._returnType else: self._returnType_placeholder = _parse_datatype_string(self._returnType) return self._returnType_placeholder @property def _judf(self): # It is possible that concurrent access, to newly created UDF, # will initialize multiple UserDefinedPythonFunctions. # This is unlikely, doesn't affect correctness, # and should have a minimal performance impact. if self._judf_placeholder is None: self._judf_placeholder = self._create_judf() return self._judf_placeholder def _create_judf(self): from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() sc = spark.sparkContext wrapped_func = _wrap_function(sc, self.func, self.returnType) jdt = spark._jsparkSession.parseDataType(self.returnType.json()) judf = sc._jvm.org.apache.spark.sql.execution.python.UserDefinedPythonFunction( self._name, wrapped_func, jdt, self._vectorized) return judf def __call__(self, *cols): judf = self._judf sc = SparkContext._active_spark_context return Column(judf.apply(_to_seq(sc, cols, _to_java_column))) def _wrapped(self): """ Wrap this udf with a function and attach docstring from func """ # It is possible for a callable instance without __name__ attribute or/and # __module__ attribute to be wrapped here. For example, functools.partial. In this case, # we should avoid wrapping the attributes from the wrapped function to the wrapper # function. So, we take out these attribute names from the default names to set and # then manually assign it after being wrapped. assignments = tuple( a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__') @functools.wraps(self.func, assigned=assignments) def wrapper(*args): return self(*args) wrapper.__name__ = self._name wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__') else self.func.__class__.__module__) wrapper.func = self.func wrapper.returnType = self.returnType return wrapper def _create_udf(f, returnType, vectorized): def _udf(f, returnType=StringType(), vectorized=vectorized): if vectorized: import inspect if len(inspect.getargspec(f).args) == 0: raise NotImplementedError("0-parameter pandas_udfs are not currently supported") udf_obj = UserDefinedFunction(f, returnType, vectorized=vectorized) return udf_obj._wrapped() # decorator @udf, @udf(), @udf(dataType()), or similar with @pandas_udf if f is None or isinstance(f, (str, DataType)): # If DataType has been passed as a positional argument # for decorator use it as a returnType return_type = f or returnType return functools.partial(_udf, returnType=return_type, vectorized=vectorized) else: return _udf(f=f, returnType=returnType, vectorized=vectorized) @since(1.3) def udf(f=None, returnType=StringType()): """Creates a :class:`Column` expression representing a user defined function (UDF). .. note:: The user-defined functions must be deterministic. Due to optimization, duplicate invocations may be eliminated or the function may even be invoked more times than it is present in the query. :param f: python function if used as a standalone function :param returnType: a :class:`pyspark.sql.types.DataType` object >>> from pyspark.sql.types import IntegerType >>> slen = udf(lambda s: len(s), IntegerType()) >>> @udf ... def to_upper(s): ... if s is not None: ... return s.upper() ... >>> @udf(returnType=IntegerType()) ... def add_one(x): ... if x is not None: ... return x + 1 ... >>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age")) >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show() +----------+--------------+------------+ |slen(name)|to_upper(name)|add_one(age)| +----------+--------------+------------+ | 8| JOHN DOE| 22| +----------+--------------+------------+ """ return _create_udf(f, returnType=returnType, vectorized=False) @since(2.3) def pandas_udf(f=None, returnType=StringType()): """ Creates a :class:`Column` expression representing a user defined function (UDF) that accepts `Pandas.Series` as input arguments and outputs a `Pandas.Series` of the same length. :param f: python function if used as a standalone function :param returnType: a :class:`pyspark.sql.types.DataType` object >>> from pyspark.sql.types import IntegerType, StringType >>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) >>> @pandas_udf(returnType=StringType()) ... def to_upper(s): ... return s.str.upper() ... >>> @pandas_udf(returnType="integer") ... def add_one(x): ... return x + 1 ... >>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age")) >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\ ... .show() # doctest: +SKIP +----------+--------------+------------+ |slen(name)|to_upper(name)|add_one(age)| +----------+--------------+------------+ | 8| JOHN DOE| 22| +----------+--------------+------------+ """ return _create_udf(f, returnType=returnType, vectorized=True) blacklist = ['map', 'since', 'ignore_unicode_prefix'] __all__ = [k for k, v in globals().items() if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist] __all__.sort() def _test(): import doctest from pyspark.sql import Row, SparkSession import pyspark.sql.functions globs = pyspark.sql.functions.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.functions tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)]) (failure_count, test_count) = doctest.testmod( pyspark.sql.functions, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) spark.stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
apache-2.0
thebotmechanic/gew_analysis_tool
heatmap_gen/getPolar_fin.py
1
2372
#!/usr/bin/python import sys from pylab import * from scipy.interpolate import griddata import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np def getQuad(theta, i): # get quadrand th_bins = 20 # since 4 quadrants x 5 states th_boundary = np.linspace(0, 2 * np.pi, th_bins+1) hist, edge = np.histogram(theta, th_boundary) quad = int( np.dot(hist,np.linspace(1, th_bins, th_bins)) ) # get quadrand i_bins = 6 # i_boundary = np.linspace(0, 100, i_bins+1) # print i_boundary hist, edge = np.histogram(i, i_boundary) val = int( np.dot(hist,np.linspace(1, i_bins, i_bins)) ) #print val return (quad+th_bins*(val-1)) # function to lookup saved .csv file def lookup2(i): return csv_doc[i][1] ############ MAIN FUNCTION #################### if __name__ == "__main__": # read .csv file # must be passed when program is called try: csvName = sys.argv[1] csvAddress = 'csv/' csvAddress += str(csvName) csvAddress += '.csv' saveName = sys.argv[1] saveAddress = 'figures/' + str(saveName) saveAddress += '.png' except: print 'need to pass name of a .csv file locates in csv folder' if (len(sys.argv) > 2): saveName = sys.argv[2] saveAddress = 'figures/' + str(saveName) saveAddress += '.png' print str(saveName) # define home directory where csv files are located csv_doc = genfromtxt(str(csvAddress),delimiter=',') # setup figure fig = plt.figure() ax = Axes3D(fig) # setup polar grid rad = np.linspace(0, 100, 100) # 0-100, 100 interval lines azm = np.linspace(0, 2 * np.pi, 21) # 0-2pi, 21 interval lines r, th = np.meshgrid(rad, azm) # get 2D mesh of radial and angular coordinates # initialise variable to store freq data z = (r * 0) +1 # print len(z) # will equal 21 # print len(z[0]) # will equal 100 # fill 'z' with freq info # cycle through each slice in circle for i in range(len(z)): # start in centre, move out radially for j in range (len(z[i])): val = getQuad(th[i][j], r[i][j]) # return index for emotions #print val z[i][j] = lookup2(val-1) plt.subplot(projection="polar") # see http://matplotlib.org/examples/color/colormaps_reference.html plt.pcolormesh(th, r, z, cmap='Greys') #plt.pcolormesh(th, z, r) plt.plot(azm, r, color='k', ls='none') plt.grid() plt.savefig(saveAddress) if (len(sys.argv) > 3): plt.show()
mit
BiaDarkia/scikit-learn
sklearn/utils/validation.py
3
32574
"""Utilities for input validation""" # Authors: Olivier Grisel # Gael Varoquaux # Andreas Mueller # Lars Buitinck # Alexandre Gramfort # Nicolas Tresegnie # License: BSD 3 clause import warnings import numbers import numpy as np import scipy.sparse as sp from numpy.core.numeric import ComplexWarning from ..externals import six from ..utils.fixes import signature from .. import get_config as _get_config from ..exceptions import NonBLASDotWarning from ..exceptions import NotFittedError from ..exceptions import DataConversionWarning from ..externals.joblib import Memory FLOAT_DTYPES = (np.float64, np.float32, np.float16) # Silenced by default to reduce verbosity. Turn on at runtime for # performance profiling. warnings.simplefilter('ignore', NonBLASDotWarning) def _assert_all_finite(X, allow_nan=False): """Like assert_all_finite, but only for ndarray.""" if _get_config()['assume_finite']: return X = np.asanyarray(X) # First try an O(n) time, O(1) space solution for the common case that # everything is finite; fall back to O(n) space np.isfinite to prevent # false positives from overflow in sum method. is_float = X.dtype.kind in 'fc' if is_float and np.isfinite(X.sum()): pass elif is_float: msg_err = "Input contains {} or a value too large for {!r}." if (allow_nan and np.isinf(X).any() or not allow_nan and not np.isfinite(X).all()): type_err = 'infinity' if allow_nan else 'NaN, infinity' raise ValueError(msg_err.format(type_err, X.dtype)) def assert_all_finite(X, allow_nan=False): """Throw a ValueError if X contains NaN or infinity. Parameters ---------- X : array or sparse matrix allow_nan : bool """ _assert_all_finite(X.data if sp.issparse(X) else X, allow_nan) def as_float_array(X, copy=True, force_all_finite=True): """Converts an array-like to an array of floats. The new dtype will be np.float32 or np.float64, depending on the original type. The function can create a copy or modify the argument depending on the argument copy. Parameters ---------- X : {array-like, sparse matrix} copy : bool, optional If True, a copy of X will be created. If False, a copy may still be returned if X's dtype is not a floating point type. force_all_finite : boolean or 'allow-nan', (default=True) Whether to raise an error on np.inf and np.nan in X. The possibilities are: - True: Force all values of X to be finite. - False: accept both np.inf and np.nan in X. - 'allow-nan': accept only np.nan values in X. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. Returns ------- XT : {array, sparse matrix} An array of type np.float """ if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray) and not sp.issparse(X)): return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64, copy=copy, force_all_finite=force_all_finite, ensure_2d=False) elif sp.issparse(X) and X.dtype in [np.float32, np.float64]: return X.copy() if copy else X elif X.dtype in [np.float32, np.float64]: # is numpy array return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X else: if X.dtype.kind in 'uib' and X.dtype.itemsize <= 4: return_dtype = np.float32 else: return_dtype = np.float64 return X.astype(return_dtype) def _is_arraylike(x): """Returns whether the input is array-like""" return (hasattr(x, '__len__') or hasattr(x, 'shape') or hasattr(x, '__array__')) def _num_samples(x): """Return number of samples in array-like x.""" if hasattr(x, 'fit') and callable(x.fit): # Don't get num_samples from an ensembles length! raise TypeError('Expected sequence or array-like, got ' 'estimator %s' % x) if not hasattr(x, '__len__') and not hasattr(x, 'shape'): if hasattr(x, '__array__'): x = np.asarray(x) else: raise TypeError("Expected sequence or array-like, got %s" % type(x)) if hasattr(x, 'shape'): if len(x.shape) == 0: raise TypeError("Singleton array %r cannot be considered" " a valid collection." % x) return x.shape[0] else: return len(x) def _shape_repr(shape): """Return a platform independent representation of an array shape Under Python 2, the `long` type introduces an 'L' suffix when using the default %r format for tuples of integers (typically used to store the shape of an array). Under Windows 64 bit (and Python 2), the `long` type is used by default in numpy shapes even when the integer dimensions are well below 32 bit. The platform specific type causes string messages or doctests to change from one platform to another which is not desirable. Under Python 3, there is no more `long` type so the `L` suffix is never introduced in string representation. >>> _shape_repr((1, 2)) '(1, 2)' >>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2 >>> _shape_repr((one, 2 * one)) '(1, 2)' >>> _shape_repr((1,)) '(1,)' >>> _shape_repr(()) '()' """ if len(shape) == 0: return "()" joined = ", ".join("%d" % e for e in shape) if len(shape) == 1: # special notation for singleton tuples joined += ',' return "(%s)" % joined def check_memory(memory): """Check that ``memory`` is joblib.Memory-like. joblib.Memory-like means that ``memory`` can be converted into a sklearn.externals.joblib.Memory instance (typically a str denoting the ``cachedir``) or has the same interface (has a ``cache`` method). Parameters ---------- memory : None, str or object with the joblib.Memory interface Returns ------- memory : object with the joblib.Memory interface Raises ------ ValueError If ``memory`` is not joblib.Memory-like. """ if memory is None or isinstance(memory, six.string_types): memory = Memory(cachedir=memory, verbose=0) elif not hasattr(memory, 'cache'): raise ValueError("'memory' should be None, a string or have the same" " interface as sklearn.externals.joblib.Memory." " Got memory='{}' instead.".format(memory)) return memory def check_consistent_length(*arrays): """Check that all arrays have consistent first dimensions. Checks whether all objects in arrays have the same shape or length. Parameters ---------- *arrays : list or tuple of input objects. Objects that will be checked for consistent length. """ lengths = [_num_samples(X) for X in arrays if X is not None] uniques = np.unique(lengths) if len(uniques) > 1: raise ValueError("Found input variables with inconsistent numbers of" " samples: %r" % [int(l) for l in lengths]) def indexable(*iterables): """Make arrays indexable for cross-validation. Checks consistent length, passes through None, and ensures that everything can be indexed by converting sparse matrices to csr and converting non-interable objects to arrays. Parameters ---------- *iterables : lists, dataframes, arrays, sparse matrices List of objects to ensure sliceability. """ result = [] for X in iterables: if sp.issparse(X): result.append(X.tocsr()) elif hasattr(X, "__getitem__") or hasattr(X, "iloc"): result.append(X) elif X is None: result.append(X) else: result.append(np.array(X)) check_consistent_length(*result) return result def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy, force_all_finite): """Convert a sparse matrix to a given format. Checks the sparse format of spmatrix and converts if necessary. Parameters ---------- spmatrix : scipy sparse matrix Input to validate and convert. accept_sparse : string, boolean or list/tuple of strings String[s] representing allowed sparse matrix formats ('csc', 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but not in the allowed format, it will be converted to the first listed format. True allows the input to be any format. False means that a sparse matrix input will raise an error. dtype : string, type or None Data type of result. If None, the dtype of the input is preserved. copy : boolean Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean or 'allow-nan', (default=True) Whether to raise an error on np.inf and np.nan in X. The possibilities are: - True: Force all values of X to be finite. - False: accept both np.inf and np.nan in X. - 'allow-nan': accept only np.nan values in X. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. Returns ------- spmatrix_converted : scipy sparse matrix. Matrix that is ensured to have an allowed type. """ if dtype is None: dtype = spmatrix.dtype changed_format = False if isinstance(accept_sparse, six.string_types): accept_sparse = [accept_sparse] if accept_sparse is False: raise TypeError('A sparse matrix was passed, but dense ' 'data is required. Use X.toarray() to ' 'convert to a dense numpy array.') elif isinstance(accept_sparse, (list, tuple)): if len(accept_sparse) == 0: raise ValueError("When providing 'accept_sparse' " "as a tuple or list, it must contain at " "least one string value.") # ensure correct sparse format if spmatrix.format not in accept_sparse: # create new with correct sparse spmatrix = spmatrix.asformat(accept_sparse[0]) changed_format = True elif accept_sparse is not True: # any other type raise ValueError("Parameter 'accept_sparse' should be a string, " "boolean or list of strings. You provided " "'accept_sparse={}'.".format(accept_sparse)) if dtype != spmatrix.dtype: # convert dtype spmatrix = spmatrix.astype(dtype) elif copy and not changed_format: # force copy spmatrix = spmatrix.copy() if force_all_finite: if not hasattr(spmatrix, "data"): warnings.warn("Can't check %s sparse matrix for nan or inf." % spmatrix.format) else: _assert_all_finite(spmatrix.data, allow_nan=force_all_finite == 'allow-nan') return spmatrix def _ensure_no_complex_data(array): if hasattr(array, 'dtype') and array.dtype is not None \ and hasattr(array.dtype, 'kind') and array.dtype.kind == "c": raise ValueError("Complex data not supported\n" "{}\n".format(array)) def check_array(array, accept_sparse=False, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, ensure_min_samples=1, ensure_min_features=1, warn_on_dtype=False, estimator=None): """Input validation on an array, list, sparse matrix or similar. By default, the input is converted to an at least 2D numpy array. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters ---------- array : object Input object to check / convert. accept_sparse : string, boolean or list/tuple of strings (default=False) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. If the input is sparse but not in the allowed format, it will be converted to the first listed format. True allows the input to be any format. False means that a sparse matrix input will raise an error. .. deprecated:: 0.19 Passing 'None' to parameter ``accept_sparse`` in methods is deprecated in version 0.19 "and will be removed in 0.21. Use ``accept_sparse=False`` instead. dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. When order is None (default), then if copy=False, nothing is ensured about the memory layout of the output array; otherwise (copy=True) the memory layout of the returned array is kept as close as possible to the original array. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean or 'allow-nan', (default=True) Whether to raise an error on np.inf and np.nan in X. The possibilities are: - True: Force all values of X to be finite. - False: accept both np.inf and np.nan in X. - 'allow-nan': accept only np.nan values in X. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. ensure_2d : boolean (default=True) Whether to raise a value error if X is not 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. ensure_min_samples : int (default=1) Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. warn_on_dtype : boolean (default=False) Raise DataConversionWarning if the dtype of the input data structure does not match the requested dtype, causing a memory copy. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X. """ # accept_sparse 'None' deprecation check if accept_sparse is None: warnings.warn( "Passing 'None' to parameter 'accept_sparse' in methods " "check_array and check_X_y is deprecated in version 0.19 " "and will be removed in 0.21. Use 'accept_sparse=False' " " instead.", DeprecationWarning) accept_sparse = False # store reference to original array to check if copy is needed when # function returns array_orig = array # store whether originally we wanted numeric dtype dtype_numeric = isinstance(dtype, six.string_types) and dtype == "numeric" dtype_orig = getattr(array, "dtype", None) if not hasattr(dtype_orig, 'kind'): # not a data type (e.g. a column named dtype in a pandas DataFrame) dtype_orig = None if dtype_numeric: if dtype_orig is not None and dtype_orig.kind == "O": # if input is object, convert to float. dtype = np.float64 else: dtype = None if isinstance(dtype, (list, tuple)): if dtype_orig is not None and dtype_orig in dtype: # no dtype conversion required dtype = None else: # dtype conversion required. Let's select the first element of the # list of accepted types. dtype = dtype[0] if force_all_finite not in (True, False, 'allow-nan'): raise ValueError('force_all_finite should be a bool or "allow-nan"' '. Got {!r} instead'.format(force_all_finite)) if estimator is not None: if isinstance(estimator, six.string_types): estimator_name = estimator else: estimator_name = estimator.__class__.__name__ else: estimator_name = "Estimator" context = " by %s" % estimator_name if estimator is not None else "" if sp.issparse(array): _ensure_no_complex_data(array) array = _ensure_sparse_format(array, accept_sparse, dtype, copy, force_all_finite) else: # If np.array(..) gives ComplexWarning, then we convert the warning # to an error. This is needed because specifying a non complex # dtype to the function converts complex to real dtype, # thereby passing the test made in the lines following the scope # of warnings context manager. with warnings.catch_warnings(): try: warnings.simplefilter('error', ComplexWarning) array = np.asarray(array, dtype=dtype, order=order) except ComplexWarning: raise ValueError("Complex data not supported\n" "{}\n".format(array)) # It is possible that the np.array(..) gave no warning. This happens # when no dtype conversion happened, for example dtype = None. The # result is that np.array(..) produces an array of complex dtype # and we need to catch and raise exception for such cases. _ensure_no_complex_data(array) if ensure_2d: # If input is scalar raise error if array.ndim == 0: raise ValueError( "Expected 2D array, got scalar array instead:\narray={}.\n" "Reshape your data either using array.reshape(-1, 1) if " "your data has a single feature or array.reshape(1, -1) " "if it contains a single sample.".format(array)) # If input is 1D raise error if array.ndim == 1: raise ValueError( "Expected 2D array, got 1D array instead:\narray={}.\n" "Reshape your data either using array.reshape(-1, 1) if " "your data has a single feature or array.reshape(1, -1) " "if it contains a single sample.".format(array)) # in the future np.flexible dtypes will be handled like object dtypes if dtype_numeric and np.issubdtype(array.dtype, np.flexible): warnings.warn( "Beginning in version 0.22, arrays of strings will be " "interpreted as decimal numbers if parameter 'dtype' is " "'numeric'. It is recommended that you convert the array to " "type np.float64 before passing it to check_array.", FutureWarning) # make sure we actually converted to numeric: if dtype_numeric and array.dtype.kind == "O": array = array.astype(np.float64) if not allow_nd and array.ndim >= 3: raise ValueError("Found array with dim %d. %s expected <= 2." % (array.ndim, estimator_name)) if force_all_finite: _assert_all_finite(array, allow_nan=force_all_finite == 'allow-nan') shape_repr = _shape_repr(array.shape) if ensure_min_samples > 0: n_samples = _num_samples(array) if n_samples < ensure_min_samples: raise ValueError("Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required%s." % (n_samples, shape_repr, ensure_min_samples, context)) if ensure_min_features > 0 and array.ndim == 2: n_features = array.shape[1] if n_features < ensure_min_features: raise ValueError("Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required%s." % (n_features, shape_repr, ensure_min_features, context)) if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig: msg = ("Data with input dtype %s was converted to %s%s." % (dtype_orig, array.dtype, context)) warnings.warn(msg, DataConversionWarning) if copy and np.may_share_memory(array, array_orig): array = np.array(array, dtype=dtype, order=order) return array def check_X_y(X, y, accept_sparse=False, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, multi_output=False, ensure_min_samples=1, ensure_min_features=1, y_numeric=False, warn_on_dtype=False, estimator=None): """Input validation for standard estimators. Checks X and y for consistent length, enforces X 2d and y 1d. Standard input checks are only applied to y, such as checking that y does not have np.nan or np.inf targets. For multi-label y, set multi_output=True to allow 2d and sparse y. If the dtype of X is object, attempt converting to float, raising on failure. Parameters ---------- X : nd-array, list or sparse matrix Input data. y : nd-array, list or sparse matrix Labels. accept_sparse : string, boolean or list of string (default=False) String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. If the input is sparse but not in the allowed format, it will be converted to the first listed format. True allows the input to be any format. False means that a sparse matrix input will raise an error. .. deprecated:: 0.19 Passing 'None' to parameter ``accept_sparse`` in methods is deprecated in version 0.19 "and will be removed in 0.21. Use ``accept_sparse=False`` instead. dtype : string, type, list of types or None (default="numeric") Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : 'F', 'C' or None (default=None) Whether an array will be forced to be fortran or c-style. copy : boolean (default=False) Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : boolean or 'allow-nan', (default=True) Whether to raise an error on np.inf and np.nan in X. This parameter does not influence whether y can have np.inf or np.nan values. The possibilities are: - True: Force all values of X to be finite. - False: accept both np.inf and np.nan in X. - 'allow-nan': accept only np.nan values in X. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. ensure_2d : boolean (default=True) Whether to make X at least 2d. allow_nd : boolean (default=False) Whether to allow X.ndim > 2. multi_output : boolean (default=False) Whether to allow 2-d y (array or sparse matrix). If false, y will be validated as a vector. y cannot have np.nan or np.inf values if multi_output=True. ensure_min_samples : int (default=1) Make sure that X has a minimum number of samples in its first axis (rows for a 2D array). ensure_min_features : int (default=1) Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when X has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. y_numeric : boolean (default=False) Whether to ensure that y has a numeric type. If dtype of y is object, it is converted to float64. Should only be used for regression algorithms. warn_on_dtype : boolean (default=False) Raise DataConversionWarning if the dtype of the input data structure does not match the requested dtype, causing a memory copy. estimator : str or estimator instance (default=None) If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X. y_converted : object The converted and validated y. """ X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, warn_on_dtype, estimator) if multi_output: y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False, dtype=None) else: y = column_or_1d(y, warn=True) _assert_all_finite(y) if y_numeric and y.dtype.kind == 'O': y = y.astype(np.float64) check_consistent_length(X, y) return X, y def column_or_1d(y, warn=False): """ Ravel column or 1d numpy array, else raises an error Parameters ---------- y : array-like warn : boolean, default False To control display of warnings. Returns ------- y : array """ shape = np.shape(y) if len(shape) == 1: return np.ravel(y) if len(shape) == 2 and shape[1] == 1: if warn: warnings.warn("A column-vector y was passed when a 1d array was" " expected. Please change the shape of y to " "(n_samples, ), for example using ravel().", DataConversionWarning, stacklevel=2) return np.ravel(y) raise ValueError("bad input shape {0}".format(shape)) def check_random_state(seed): """Turn seed into a np.random.RandomState instance Parameters ---------- seed : None | int | instance of RandomState If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, (numbers.Integral, np.integer)): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError('%r cannot be used to seed a numpy.random.RandomState' ' instance' % seed) def has_fit_parameter(estimator, parameter): """Checks whether the estimator's fit method supports the given parameter. Parameters ---------- estimator : object An estimator to inspect. parameter: str The searched parameter. Returns ------- is_parameter: bool Whether the parameter was found to be a named parameter of the estimator's fit method. Examples -------- >>> from sklearn.svm import SVC >>> has_fit_parameter(SVC(), "sample_weight") True """ return parameter in signature(estimator.fit).parameters def check_symmetric(array, tol=1E-10, raise_warning=True, raise_exception=False): """Make sure that array is 2D, square and symmetric. If the array is not symmetric, then a symmetrized version is returned. Optionally, a warning or exception is raised if the matrix is not symmetric. Parameters ---------- array : nd-array or sparse matrix Input object to check / convert. Must be two-dimensional and square, otherwise a ValueError will be raised. tol : float Absolute tolerance for equivalence of arrays. Default = 1E-10. raise_warning : boolean (default=True) If True then raise a warning if conversion is required. raise_exception : boolean (default=False) If True then raise an exception if array is not symmetric. Returns ------- array_sym : ndarray or sparse matrix Symmetrized version of the input array, i.e. the average of array and array.transpose(). If sparse, then duplicate entries are first summed and zeros are eliminated. """ if (array.ndim != 2) or (array.shape[0] != array.shape[1]): raise ValueError("array must be 2-dimensional and square. " "shape = {0}".format(array.shape)) if sp.issparse(array): diff = array - array.T # only csr, csc, and coo have `data` attribute if diff.format not in ['csr', 'csc', 'coo']: diff = diff.tocsr() symmetric = np.all(abs(diff.data) < tol) else: symmetric = np.allclose(array, array.T, atol=tol) if not symmetric: if raise_exception: raise ValueError("Array must be symmetric") if raise_warning: warnings.warn("Array is not symmetric, and will be converted " "to symmetric by average with its transpose.") if sp.issparse(array): conversion = 'to' + array.format array = getattr(0.5 * (array + array.T), conversion)() else: array = 0.5 * (array + array.T) return array def check_is_fitted(estimator, attributes, msg=None, all_or_any=all): """Perform is_fitted validation for estimator. Checks if the estimator is fitted by verifying the presence of "all_or_any" of the passed attributes and raises a NotFittedError with the given message. Parameters ---------- estimator : estimator instance. estimator instance for which the check is performed. attributes : attribute name(s) given as string or a list/tuple of strings Eg.: ``["coef_", "estimator_", ...], "coef_"`` msg : string The default error message is, "This %(name)s instance is not fitted yet. Call 'fit' with appropriate arguments before using this method." For custom messages if "%(name)s" is present in the message string, it is substituted for the estimator name. Eg. : "Estimator, %(name)s, must be fitted before sparsifying". all_or_any : callable, {all, any}, default all Specify whether all or any of the given attributes must exist. Returns ------- None Raises ------ NotFittedError If the attributes are not found. """ if msg is None: msg = ("This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this method.") if not hasattr(estimator, 'fit'): raise TypeError("%s is not an estimator instance." % (estimator)) if not isinstance(attributes, (list, tuple)): attributes = [attributes] if not all_or_any([hasattr(estimator, attr) for attr in attributes]): raise NotFittedError(msg % {'name': type(estimator).__name__}) def check_non_negative(X, whom): """ Check if there is any negative value in an array. Parameters ---------- X : array-like or sparse matrix Input data. whom : string Who passed X to this function. """ X = X.data if sp.issparse(X) else X if (X < 0).any(): raise ValueError("Negative values in data passed to %s" % whom)
bsd-3-clause
Barmaley-exe/scikit-learn
doc/datasets/mldata_fixture.py
367
1183
"""Fixture module to skip the datasets loading when offline Mock urllib2 access to mldata.org and create a temporary data folder. """ from os import makedirs from os.path import join import numpy as np import tempfile import shutil from sklearn import datasets from sklearn.utils.testing import install_mldata_mock from sklearn.utils.testing import uninstall_mldata_mock def globs(globs): # Create a temporary folder for the data fetcher global custom_data_home custom_data_home = tempfile.mkdtemp() makedirs(join(custom_data_home, 'mldata')) globs['custom_data_home'] = custom_data_home return globs def setup_module(): # setup mock urllib2 module to avoid downloading from mldata.org install_mldata_mock({ 'mnist-original': { 'data': np.empty((70000, 784)), 'label': np.repeat(np.arange(10, dtype='d'), 7000), }, 'iris': { 'data': np.empty((150, 4)), }, 'datasets-uci-iris': { 'double0': np.empty((150, 4)), 'class': np.empty((150,)), }, }) def teardown_module(): uninstall_mldata_mock() shutil.rmtree(custom_data_home)
bsd-3-clause
tambetm/neon
neon/diagnostics/timing_plots.py
13
8617
# ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Logging and visalization for the data collected from backend timing decorators """ import numpy as np import logging import matplotlib matplotlib.use('Agg') # not for plotting but write to file. from matplotlib import pyplot as plt # with a middlefinger to pep8: # noqa matplotlib.rcParams['pdf.fonttype'] = 42 # TTF to be editable logger = logging.getLogger(__name__) def print_performance_stats(backend, logger): call_list = backend.flop_dict.keys() used_call_list = [] timed_calls = [] timed_times = [] total_time = 0 total_tflop = 0 for call in call_list: logger.info("Performed %s calls in %2.2fs:" + " %0.2fTFLOPS from %2.0fGFLOP", (str(len(backend.flop_dict[call]))+" "+call).ljust(15), sum(backend.time_dict[call]), sum(backend.flop_dict[call]) / sum(backend.time_dict[call]) / 1e12, sum(backend.flop_dict[call]) / 1e9) # Histogram of where the time is spent. tflop_array = np.array(backend.flop_dict[call]) / 1e12 time_array = np.array(backend.time_dict[call]) total_time += time_array.sum() total_tflop += tflop_array.sum() tflop_per_s = tflop_array / time_array # in GFLOP/s # plot only the biggest contributors if time_array.sum() > .001: used_call_list.append(call) timed_calls.append(tflop_per_s) timed_times.append(time_array) # gather data for plots paren_stash = get_parent_timing(used_call_list, backend) lfs, lts, soumith_stash = get_flops_times(used_call_list, backend) # plot the plots sufx = 'test' + backend.__module__ fname1 = 'figure1_'+sufx+'.pdf' fname2 = 'figure2_'+sufx+'.pdf' first_fig(paren_stash, used_call_list, timed_calls, timed_times, total_time, total_tflop, fname1) second_fig(lfs, lts, fname2) def get_parent_timing(used_call_list, backend): # compute timing per parent call: paren_stash = dict() for call in used_call_list: unique_paren_list = set(backend.paren_dic[call]) for paren in unique_paren_list: # add up times for "call" from "paren" time_stats = np.array([backend.time_dict[call][i] for i, x in enumerate(backend.paren_dic[call]) if x == paren]).sum() paren_stash[call + " from " + paren] = time_stats return paren_stash def get_flops_times(used_call_list, backend): # compute timing per layer call: layer_flops_stash = dict() layer_time_stash = dict() soumith_stash = dict() for call in used_call_list: unique_layer_list = set(backend.layer_dic[call]) for layer in unique_layer_list: # add up times for "call" from "paren" time_stats = np.array([backend.time_dict[call][i] for i, x in enumerate(backend.layer_dic[call]) if x == layer]).sum() soumith_be = np.array([backend.time_dict[call][i] for i, x in enumerate(backend.layer_dic[call]) if x == layer]).mean() flop_stats = np.array([backend.flop_dict[call][i] for i, x in enumerate(backend.layer_dic[call]) if x == layer]).sum() calllayer = call + " from " + layer layer_flops_stash[calllayer] = flop_stats / time_stats / 1e9 layer_time_stash[calllayer] = time_stats soumith_stash[calllayer] = 1000. * soumith_be return (layer_flops_stash, layer_time_stash, soumith_stash) def first_fig(paren_stash, used_call_list, timed_calls, timed_times, total_time, total_tflop, fname): """ First figure: a) one bar plot of time by function call and parent function. b) one histogram of Time spent vs. FLOPS achieved """ paren_col_stash = ['b' if 'fprop_fc' in k else 'g' if 'bprop_fc' in k else 'r' if 'update_fc' in k else 'c' if 'fprop_conv' in k else 'm' if 'bprop_conv' in k else 'y' if 'date_conv' in k else 'k' for k in paren_stash.keys()] plt.figure(1, figsize=(12, 6), dpi=120, facecolor='w', edgecolor='k') plt.subplots_adjust(left=0.2, right=0.9, top=0.9, bottom=0.1) plt.subplot(1, 2, 1) plt.barh(range(len(paren_stash)), paren_stash.values(), color=paren_col_stash, align='center', alpha=0.5) plt.yticks(range(len(paren_stash)), paren_stash.keys()) plt.title(r'Breakdown of MOP calls by parent') plt.xlabel('Time/s') # Second plot: speed vs. time times_col_stash = ['b' if 'fprop_fc' in k else 'g' if 'bprop_fc' in k else 'r' if 'update_fc' in k else 'c' if 'fprop_conv' in k else 'm' if 'bprop_conv' in k else 'y' if 'date_conv' in k else 'k' for k in used_call_list] plt.subplot(1, 2, 2) num_bins = 30 n, bins, patches = plt.hist(timed_calls, num_bins, weights=timed_times, range=(0, 7.5), color=times_col_stash, histtype='barstacked', normed=0, alpha=0.5) plt.title(r'Total %2.1fs %2.0fTF average %2.1fTFLOP/s' % (total_time, total_tflop, total_tflop/total_time)) plt.xlabel('TFLOPS') plt.xlim((0, 7.5)) plt.ylabel('Time (s)') plt.legend(used_call_list, prop={'size': 6}) plt.savefig(fname, dpi=500) # savefig overrides dpi value def second_fig(layer_flops_stash, layer_time_stash, fname): layer_col_stash = ['b' if 'conv1' in k else 'g' if 'conv2' in k else 'r' if 'fc' in k else 'c' if 'anon' in k else 'm' if 'output' in k else 'k' for k in layer_flops_stash.keys()] plt.figure(2, figsize=(12, 6), dpi=120, facecolor='w', edgecolor='k') plt.subplots_adjust(left=0.2, right=0.9, top=0.9, bottom=0.1) plt.subplot(1, 2, 1) plt.barh(range(len(layer_flops_stash)), layer_flops_stash.values(), color=layer_col_stash, align='center', alpha=0.5) plt.yticks(range(len(layer_flops_stash)), layer_flops_stash.keys()) plt.title(r'Breakdown of MOP calls by layer') # plt.xlim((0, 5.5)) plt.xlabel('TFLOPS') # second plot: time per call plt.subplot(1, 2, 2) plt.barh(range(len(layer_flops_stash)), layer_time_stash.values(), color=layer_col_stash, align='center', alpha=0.5) plt.yticks(range(len(layer_flops_stash)), range(len(layer_flops_stash))) plt.title(r'Breakdown of MOP calls by layer') # plt.xlim((0, 7)) plt.xlabel('Time (s)') plt.savefig(fname, dpi=500) def log_soumith_numbers(soumith_stash, layer_flops_stash, layer_time_stash): """ print out soumith benchmark numers """ logger.info("Soumith Benchmarks") sum_of_all_calls = 0 for i, key in enumerate(soumith_stash.keys()): logger.info("Performed %s in\t %2.2f ms per call with 10 calls" + "totaling to %2.2f GFLOPS, %2.2fGFLOP", key, soumith_stash[key], layer_flops_stash[key], layer_flops_stash[key]*layer_time_stash[key]) sum_of_all_calls += soumith_stash[key] logger.info("Total time in call %2.2f ms ", sum_of_all_calls)
apache-2.0
RohitMetaCube/test_code
generate_career_paths_3_test_with_global_graph2.py
1
44251
''' THIS IS FINAL VERSION FOR 9 JANUARY 2017 (VERIFIED FOR "Economics" and "Biology") 1. Now we have local edges with global edges for any major nodes 2. we multiply local edge weight with a factor to be in a comparison of global edges ''' import networkx as nx from db_utils import DBUtils import csv import time import re import numpy as np from configurator import configurator import utils from filter_chain import remove_stop_words from sklearn.externals import joblib from functools import partial from multiprocessing import Pool import math dbutils = DBUtils(db_name='zippia', host='master.mongodb.d.int.zippia.com') db_local = DBUtils(db_name='test', host='localhost') db_zippia2 = DBUtils(db_name='zippia2', host='localhost') def format_date(date_string): date_string = re.sub("\W+", " ", date_string.lower()) date_string = re.sub("\d+[ ]*((year)|(yr))([s]{0,1})", " ", date_string) date_string = re.sub("\d+[ ]*((month)|(mt))([s]{0,1})", " ", date_string) date_string = re.sub(" +", " ", date_string).strip() return date_string class valid_edge_checker: level_dict = { "Lead": set([ "Senior", "Vice", "Junior", "Trainee", "Fellow", "Associate", "Adjunct", "Assistant" ]), "Leader": set([ "Senior", "Vice", "Junior", "Trainee", "Fellow", "Associate", "Adjunct", "Assistant" ]), "Senior": set([ "Vice", "Junior", "Trainee", "Fellow", "Associate", "Adjunct", "Assistant" ]), "Principal": set([ "Lead", "Head", "Senior", "Vice", "Junior", "Trainee", "Fellow", "Associate", "Adjunct", "Assistant" ]), "Chief": set([ "Lead", "Head", "Senior", "Vice", "Junior", "Trainee", "Fellow", "Associate", "Adjunct", "Assistant" ]), "Head": set([ "Senior", "Vice", "Junior", "Trainee", "Fellow", "Associate", "Adjunct", "Assistant" ]), "Deputy": set([ "Lead", "Head", "Senior", "Vice", "Junior", "Trainee", "Fellow", "Associate", "Adjunct", "Assistant" ]), "Adjunct": set(["Trainee", "Fellow", "Associate", "Assistant"]), "Associate": set(["Trainee", "Fellow", "Assistant"]), "Assistant": set(["Trainee", "Fellow"]), "Junior": set(["Trainee", "Fellow"]) } level_words = { "Volunteer": -5, "Intern": -5, "Internship": -5, "Extern": -5, "Externship": -5, "Junior": -4, "Assistant": -3, "Associate": -2, "Adjunct": -1, "Vice": -1, "Senior": 1, "Staff": 2, "Principal": 3, "Supervisor": 4, "Lead": 4, "Leader": 4, "Head": 4, "Architect": 5, "Manager": 6, "Dean": 7, "Director": 8, "Executive": 8, 'Administrator': 8, "President": 9, "Chancellor": 10, "Chief": 10, } @staticmethod def is_valid_edge(node1, node2): node1_tokens = remove_stop_words.apply(re.sub("\W+", " ", node1)).split() node2_tokens = remove_stop_words.apply(re.sub("\W+", " ", node2)).split() node2_ngrams = utils.find_ngrams(node2_tokens, 2, separator=" ") node2_tokens = set(node2_tokens) node2_ngrams.update(node2_tokens) node1_ngrams = utils.find_ngrams(node1_tokens, 2, separator=" ") node1_tokens = set(node1_tokens) node1_ngrams.update(node1_tokens) diff1 = list(node1_tokens.difference(node2_tokens)) diff2 = list(node2_tokens.difference(node1_tokens)) valid_flag = True if (((len(node1_tokens) - len(node2_tokens)) == 1 and len(node1_tokens.intersection(node2_tokens)) == len(node2_tokens) and diff1[0] in set([ "Lead", "Leader", "Senior", "Principal", "Chief", "Head", "Deputy" ])) or ((len(node2_tokens) - len(node1_tokens)) == 1 and len(node1_tokens.intersection(node2_tokens)) == len(node1_tokens) and diff2[0] in set([ "Vice", "Junior", "Trainee", "Fellow", "Associate", "Adjunct", "Assistant" ])) or (node2_ngrams.intersection([ "Internship", "Externship", "Volunteer", "Intern", "Extern", "Student", "Customer Service", "Cashier", "Server", "Bartender", 'Insurance Agent', "Research Assistant", "Teaching Assistant", "Graduate Assistant", "Administrative Assistant", "Receptionist" ]))): valid_flag = False elif len(diff1) == 1 and len(diff2) == 1 and ( diff1[0] in valid_edge_checker.level_dict and diff2[0] in valid_edge_checker.level_dict[diff1[0]]): valid_flag = False elif (node1_tokens.intersection([ "Manager", "Director", "Engineer", "Principal", "President", "Scientist", "Chief", "Chancellor", "Deputy" ]) and node2 in set([ "Sales", "Sales Representative", "Sales Associate", "Server", "Coordinator", "Pharmacist Technician" ])): valid_flag = False elif node2 in set([ "Sales", "Sales Representative", "Sales Associate", "Server", "Coordinator" ]): valid_flag = False else: levels1 = [ valid_edge_checker.level_words[t1] for t1 in diff1 if t1 in valid_edge_checker.level_words ] levels2 = [ valid_edge_checker.level_words[t2] for t2 in diff2 if t2 in valid_edge_checker.level_words ] level1 = max(levels1) if levels1 else 0 level2 = max(levels2) if levels2 else 0 if node1_tokens.intersection(node2_tokens) and level1 > level2: valid_flag = False return valid_flag def remove_edges(edges, start_titles, end_titles, edge_count_threshold=3): print "Removing reverse edges..." edge_count = 0 valid_edges = {} for edge in edges: reverse_edge = (edge[1], edge[0]) if valid_edge_checker.is_valid_edge( edge[0], edge[1]) and edges[edge]['count'] >= edge_count_threshold: if reverse_edge in edges: if (not valid_edge_checker.is_valid_edge(edge[1], edge[0]) ) or (edges[edge]['count'] >= edges[reverse_edge]['count'] and reverse_edge not in valid_edges): valid_edges[edge] = edges[edge]['count'] else: valid_edges[edge] = edges[edge]['count'] if edge[0] in start_titles and edge[ 1] in end_titles and edge in valid_edges: valid_edges[edge] = valid_edges[edge] / 10 if valid_edges[ edge] > 10 else 0.1 edge_count += 1 if edge_count % 100000 == 0: print "{} edges processed.".format(edge_count) print "Removed reverse edges..." return valid_edges def read_global_frequent_transitions(): edges = {} f = open('frequent_transitions_all_edges.csv', 'rb') fr = csv.reader(f, delimiter='\t') for row in fr: from_title = row[0] to_title = row[1] freq = int(row[2]) edge = (from_title, to_title) edges[edge] = freq f.close() return edges global_edges = read_global_frequent_transitions() def manage_edges(edge_dict, resume_count, depriotize_starts): if resume_count <= 3000: threshold = 5 elif resume_count <= 20000: threshold = 50 else: threshold = 100 new_degree_edges = {} all_nodes = set() for edge in edge_dict: all_nodes.add(edge[0]) all_nodes.add(edge[1]) all_nodes = list(all_nodes) for i, n1 in enumerate(all_nodes): for n2 in all_nodes[i + 1:]: node1 = n1 node2 = n2 if (((n1, n2) not in global_edges and (n2, n1) in global_edges) or ((n1, n2) in global_edges and (n2, n1) in global_edges and global_edges[(n2, n1)] > global_edges[(n1, n2)])): node1 = n2 node2 = n1 if (((node1, node2) in global_edges and global_edges[(node1, node2)] >= threshold) or ((node1, node2) in edge_dict and (edge_dict[(node1, node2)]['count'] * 10) >= threshold)): new_degree_edges[(node1, node2)] = {} new_degree_edges[(node1, node2)]['count'] = global_edges[( node1, node2)] if (node1, node2) in global_edges else 0 if (node1, node2) in edge_dict: new_degree_edges[(node1, node2)]['count'] += edge_dict[( node1, node2)]['count'] * 10 if node1 in depriotize_starts or set( re.sub("\W+", " ", node1).split()).intersection( ["Sales"]): new_degree_edges[(node1, node2)]['count'] = float( new_degree_edges[(node1, node2)]['count']) / 10 new_degree_edges[(node1, node2)]['time_intervals'] = [] if (node1, node2) in edge_dict: new_degree_edges[(node1, node2)]['time_intervals'].extend( edge_dict[(node1, node2)]['time_intervals']) elif (node2, node1) in edge_dict: new_degree_edges[(node1, node2)]['time_intervals'].extend( edge_dict[(node2, node1)]['time_intervals']) return new_degree_edges def create_graph_for_majors(major, degrees, work_meta_info, min_conf, depriotize_starts): dbutils = DBUtils(db_name='zippia', host='master.mongodb.d.int.zippia.com') edges = {} nodes = set() resume_count = 0 resumes = dbutils.fetch_data( 'resume', "cursor", {'latest_ed_major': major, 'latest_ed_degree': { '$in': list(degrees) }}) for resume in resumes: ''' Look at the last education info ''' resume_count += 1 create_edges_from_resume(resume, work_meta_info, min_conf, edges, nodes, resume["latest_ed_major"]) if resume_count % 10000 == 0: print "Processed {} resumes".format(resume_count) print "Statistics for major:", major edges = manage_edges(edges, resume_count, depriotize_starts) print "The graph has: {} edges and {} nodes".format(len(edges), len(nodes)) return edges def create_graph(edges, sts, ets, edge_count_threshold): career_graph = nx.DiGraph() valid_edges = remove_edges( edges, sts, ets, edge_count_threshold=edge_count_threshold) print "length of valid edges", len(valid_edges) edge_list = [] for edge in valid_edges: weight = 1.0 / (1.0 * valid_edges[edge]) edge_list.append((edge[0], edge[1], weight)) career_graph.add_weighted_edges_from(edge_list) return career_graph def create_edges_from_resume(resume, work_meta_info, min_conf, edges, nodes, major): is_valid_date_flag = True [d, m, y] = [ int(x) for x in extract_from_date(resume["latest_ed_from"], resume[ "latest_ed_to"]).split(',') ] if y == 100000 or not y: is_valid_date_flag = False if is_valid_date_flag: valid_index = False path = [] title_time_map = {} for index in reversed(range(1, 15)): current_work_ex_meta_info = work_meta_info[index] next_work_ex_meta_info = work_meta_info[index - 1] if (current_work_ex_meta_info["title"] in resume and resume[current_work_ex_meta_info["title"]] and current_work_ex_meta_info["match"] in resume and resume[current_work_ex_meta_info["match"]] and current_work_ex_meta_info["confidence"] in resume and resume[current_work_ex_meta_info["confidence"]] >= min_conf and next_work_ex_meta_info["title"] in resume and resume[next_work_ex_meta_info["title"]] and next_work_ex_meta_info["match"] in resume and resume[next_work_ex_meta_info["match"]] and next_work_ex_meta_info["confidence"] in resume and resume[next_work_ex_meta_info["confidence"]] >= min_conf and resume[current_work_ex_meta_info["title"]] != resume[next_work_ex_meta_info["title"]]): ''' There is a valid edge present from the work experience at index to the one at (index-1) ''' current_date = extract_from_date( resume[current_work_ex_meta_info['from']], resume[current_work_ex_meta_info['to']]) [cd, cm, cy] = [int(x) for x in current_date.split(',')] if valid_index or (d + m * 30 + y * 365) <= ( cd + cm * 30 + cy * 365): from_title = resume[current_work_ex_meta_info["title"]] to_title = resume[next_work_ex_meta_info["title"]] if not valid_index: valid_index = True path.append(from_title) title_time_map[from_title] = current_date path.append(to_title) edge = (from_title, to_title) if edge not in edges: edges[edge] = {} edges[edge]['count'] = 0 edges[edge]['time_intervals'] = [] if from_title not in nodes: nodes.add(from_title) if to_title not in nodes: nodes.add(to_title) edges[edge]['count'] += 1 next_date = extract_from_date( resume[next_work_ex_meta_info['from']], resume[next_work_ex_meta_info['to']]) title_time_map[to_title] = next_date [issue_flag, date_diff] = utils.date_difference( current_date, next_date) if not issue_flag: edges[edge]['time_intervals'].append(date_diff) for i, node1 in enumerate(path): current_date = title_time_map[ node1] if node1 in title_time_map else '' for j, node2 in enumerate(path[i + 2:]): edge = (node1, node2) if edge not in edges: edges[edge] = {} edges[edge]['count'] = 0 edges[edge]['time_intervals'] = [] edges[edge]['count'] += 1 / (j + 2) next_date = title_time_map[ node2] if node2 in title_time_map else '' if current_date and next_date: [issue_flag, date_diff] = utils.date_difference( current_date, next_date) if not issue_flag: edges[edge]['time_intervals'].append(date_diff) def extract_from_date(from_date_string, to_date_string): current_date = format_date(from_date_string) d, m, y = utils.formatted_date(current_date) if y == 100000: current_date = format_date(to_date_string) d, m, y = utils.formatted_date(current_date) elif not y: [issue_flag, date_diff] = utils.date_difference(from_date_string, to_date_string) if not issue_flag: last_date = format_date(to_date_string) ld, lm, ly = utils.formatted_date(last_date) if ly and ly != 100000: to_date = ld + lm * 30 + ly * 365 - date_diff d, m, y = int((to_date % 365) % 12), int( (to_date % 365) / 12), int(to_date / 365) return "{},{},{}".format(d, m, y) def read_start_titles_and_end_titles(top_k_start=15, top_k_end=15, depriotize_starts=set()): major_title_dict = {} f = open("Start_Titles_15_dec_2016.csv", 'rb') fr = csv.reader(f, delimiter="\t") for row in fr: major = row[0] degree = row[1] norm_degree_value = degree if major not in major_title_dict: major_title_dict[major] = {} major_title_dict[major]['start_titles'] = {} major_title_dict[major]['end_titles'] = {} if norm_degree_value not in major_title_dict[major]['start_titles']: major_title_dict[major]['start_titles'][norm_degree_value] = [] major_title_dict[major]['end_titles'][norm_degree_value] = [] major_title_dict[major]['start_titles'][norm_degree_value] = [] s_count = 0 depriortize_set = [] for title_tuple in row[3:]: s_t = re.sub(",\d+\)", "", title_tuple[1:]) if major == "Computer Science" and s_t == "Network Administrator": depriortize_set.append(s_t) elif (s_t not in depriotize_starts and not set( ['Sales']).intersection(re.sub("\W+", " ", s_t).split())): major_title_dict[major]['start_titles'][ norm_degree_value].append(s_t) s_count += 1 else: depriortize_set.append(s_t) if s_count >= top_k_start: break if s_count < top_k_start: for s_t in depriortize_set: major_title_dict[major]['start_titles'][ norm_degree_value].append(s_t) f.close() f = open("End_Titles_with_exp_and_W1_both.csv", 'rb') fr = csv.reader(f, delimiter="\t") for row in fr: major = row[0] degree = row[1] norm_degree_value = degree if major not in major_title_dict: major_title_dict[major] = {} major_title_dict[major]['start_titles'] = {} major_title_dict[major]['end_titles'] = {} if norm_degree_value not in major_title_dict[major]['end_titles']: major_title_dict[major]['start_titles'][norm_degree_value] = [] major_title_dict[major]['end_titles'][norm_degree_value] = [] s_count = 0 depriortize_set = [] for title_tuple in row[3:]: s_t = re.sub(",\d+\)", "", title_tuple[1:]) if major == "Biology" and s_t in set([ "Quality Engineer", "Quality Manager", "Quality Assurance Manager", "Production Manager" ]): depriortize_set.append(s_t) else: major_title_dict[major]['end_titles'][ norm_degree_value].append(s_t) s_count += 1 if s_count >= top_k_end: break if s_count < top_k_end: for s_t in depriortize_set: major_title_dict[major]['end_titles'][ norm_degree_value].append(s_t) f.close() for major, degree_dict in major_title_dict.items(): for degree, titles in degree_dict['end_titles'].items(): if not titles or not degree_dict['start_titles'][degree]: del degree_dict['start_titles'][degree] del degree_dict['end_titles'][degree] if not degree_dict['start_titles'] or not degree_dict['end_titles']: del major_title_dict[major] return major_title_dict def similarity_computer(array1, array2): intersection_count = len(set(array1).intersection(array2)) return float((1 + intersection_count)) / len(array1) def select_best_nodes(path, edge_weights, is_local_edge_dict=True): if len(path) > 4: start_node = path[0] end_node = path[-1] best_path = [start_node, end_node] best_path_weight = 0 min_freq = 0 for i, node1 in enumerate(path[1:-1]): for node2 in path[i + 1:-1]: l = len(set([start_node, node1, node2, end_node])) if is_local_edge_dict: edge1_weight = edge_weights[( start_node, node1)]['count'] if ( start_node, node1) in edge_weights else 0 edge2_weight = edge_weights[(node1, node2)]['count'] if ( node1, node2) in edge_weights else 0 edge3_weight = edge_weights[( node2, end_node)]['count'] if (node2, end_node ) in edge_weights else 0 else: edge1_weight = edge_weights[(start_node, node1)] if ( start_node, node1) in edge_weights else 0 edge2_weight = edge_weights[( node1, node2)] if (node1, node2) in edge_weights else 0 edge3_weight = edge_weights[(node2, end_node)] if ( node2, end_node) in edge_weights else 0 if l == 4: if (min([edge1_weight, edge2_weight, edge3_weight]) > min_freq or (min([edge1_weight, edge2_weight, edge3_weight]) == min_freq and sum([edge1_weight, edge2_weight, edge3_weight]) >= best_path_weight)): best_path_weight = sum( [edge1_weight, edge2_weight, edge3_weight]) min_freq = min( [edge1_weight, edge2_weight, edge3_weight]) best_path = [start_node, node1, node2, end_node] elif l == 3 and node1 == node2 and ( min([edge1_weight, edge3_weight]) > min_freq and sum([edge1_weight, edge3_weight]) >= best_path_weight): best_path_weight = sum([edge1_weight, edge3_weight]) best_path = [start_node, node1, end_node] else: best_path = path return best_path def support_calculation(top_k_start, top_k_end, end_titles, start_titles, degree_graph, major, degree, edges, title_time_dict): selected_paths = [] rejected_paths = [] len_frac = {4: 1, 3: 0.9, 2: 0.8} for end_index, et in enumerate(end_titles[:top_k_end]): all_paths = [] for st in start_titles[:top_k_start]: if st != et: try: new_paths = nx.all_simple_paths( degree_graph, st, et, cutoff=3) c = 0 for new_path in new_paths: c += 1 l = len(new_path) if l > 1: if l > 4: l = 4 new_path2 = select_best_nodes( new_path, edges, is_local_edge_dict=True) all_paths.append([new_path2, l]) for i, node1 in enumerate(new_path2[:-1]): node2 = new_path2[i + 1] if (node1, node2) not in edges or not edges[( node1, node2)]['time_intervals']: overall_median_time = 0 start_index = new_path.index(node1) end_index = new_path.index(node2) for j, node3 in enumerate(new_path[ start_index:end_index]): node4 = new_path[start_index + j + 1] if (node3, node4) not in edges: median_time = abs( title_time_dict[node4] - title_time_dict[node3]) if ( node3 in title_time_dict and node4 in title_time_dict) else '' if not median_time: overall_median_time = "" break edges[(node3, node4)] = {} edges[(node3, node4)]['count'] = 1 edges[(node3, node4 )]['time_intervals'] = [ median_time ] elif not edges[(node3, node4 )]['time_intervals']: median_time = abs( title_time_dict[node4] - title_time_dict[node3]) if ( node3 in title_time_dict and node4 in title_time_dict) else '' if not median_time: overall_median_time = "" break edges[(node3, node4 )]['time_intervals'] = [ median_time ] overall_median_time += np.median(edges[ (node3, node4)]['time_intervals']) if overall_median_time: if (node1, node2) not in edges: edges[(node1, node2)] = {} edges[(node1, node2)]['count'] = 1 edges[(node1, node2)]['time_intervals'] = [ overall_median_time ] except: pass for i, path in enumerate(all_paths): similarities = [] for selected_path in selected_paths: similarity = similarity_computer(path[0], selected_path) similarities.append(similarity) if similarities: similarity = 1 / max(similarities) else: similarity = 4 all_paths[i].append(similarity) all_weights = [(edges[(path[0][j - 1], node)]['count'] if (path[0][j - 1], node) in edges else 0) for j, node in enumerate(path[0]) if j] all_weights = sorted(all_weights) all_paths[i].append(all_weights[0] * len_frac[path[1]] * similarity) if len(all_weights) > 1: all_paths[i].append(all_weights[1] * len_frac[path[1]] * similarity) else: all_paths[i].append(0) if len(all_weights) > 2: all_paths[i].append(all_weights[2] * len_frac[path[1]] * similarity) else: all_paths[i].append(0) del all_weights try: all_paths = sorted( all_paths, key=lambda k: (k[3], k[4], k[5]), reverse=True) best_path = all_paths[0][0] selected_paths.append(best_path) except: pass if len(selected_paths) == 15: break sl = len(selected_paths) if sl < 15: rejected_paths = sorted( rejected_paths, key=lambda k: (k[3], k[4], k[5]), reverse=True) for path_tuple in rejected_paths[:(15 - sl)]: selected_paths.append(path_tuple[0]) print "major: {}, degree: {}, clear_selected_paths: {}, after_adding_from rejected: {}".format( major, degree, sl, len(selected_paths)) for p in selected_paths: print p return selected_paths def print_paths_iterator( major, titles_dict, title_skills, required_skills_threshold, career_graph, edges, title_time_dict, iterate_on_degrees=["Graduate", "Under Graduate", "Other"]): result_paths = {} result_paths['graduate_paths'] = [] result_paths['under_graduate_paths'] = [] result_paths['other_paths'] = [] for degree, end_titles in titles_dict["end_titles"].items(): start_titles = titles_dict["start_titles"][degree] selected_paths = support_calculation(30, 50, end_titles, start_titles, career_graph, major, degree, edges, title_time_dict) path_index = 1 k = "_".join(degree.lower().split()) + "_paths" for selected_path in selected_paths: new_path = [] best_path = selected_path #select_best_nodes(selected_path, edges) for node_index, title in enumerate(best_path[:-1]): edge = (title, best_path[node_index + 1]) current_skills = set(title_skills[title][ 'skill_set']) if title in title_skills else set([]) next_skills = title_skills[best_path[node_index + 1]][ 'skill_set'] if best_path[node_index + 1] in title_skills else [] new_obj = {} new_obj['title'] = title new_obj['socCode'] = title_skills[title][ 'soc_code'] if title in title_skills else "" if edge in edges: new_obj['medianYrs'] = math.ceil( np.median(edges[edge]['time_intervals'])) if edges[ edge]['time_intervals'] else "" if not new_obj['medianYrs']: new_obj['medianYrs'] = math.ceil( abs(title_time_dict[edge[1]] - title_time_dict[edge[ 0]])) if (edge[0] in title_time_dict and edge[1] in title_time_dict) else "" new_obj['skills'] = [] new_skills_count = 0 for skill in next_skills: if skill not in current_skills: new_obj['skills'].append(skill) new_skills_count += 1 if new_skills_count >= required_skills_threshold: break new_path.append(new_obj) new_obj = {} new_obj['title'] = best_path[-1] new_obj['socCode'] = title_skills[title][ 'soc_code'] if title in title_skills else "" new_path.append(new_obj) if len(new_path) > 1: result_paths[k].append(new_path) path_index += 1 return [result_paths, edges] def append_new_paths(result_paths, edge_weights, final_valid_paths, path_name='graduate_paths'): len_frac = {4: 1, 3: 0.9, 2: 0.8} all_paths = [] path_obj = {} for path in result_paths[path_name]: grad_path = [node_obj['title'] for node_obj in path] all_paths.append(grad_path) key = "_".join(grad_path) path_obj[key] = path selected_paths = [] selected_keys = set([]) for path in final_valid_paths[path_name]: grad_path = [node_obj['title'] for node_obj in path] selected_paths.append(grad_path) selected_keys.add("_".join(grad_path)) all_paths = [[path, len(path)] for path in all_paths if "_".join(path) not in selected_keys] for i, path in enumerate(all_paths): similarities = [] for selected_path in selected_paths: similarity = similarity_computer(path[0], selected_path) similarities.append(similarity) if similarities: similarity = 1 / max(similarities) else: similarity = 4 all_paths[i].append(similarity) all_weights = [ edge_weights[(path[0][j - 1], node)]['count'] if j and (path[0][j - 1], node) in edge_weights else 0 for j, node in enumerate(path[0]) ] all_weights = sorted(all_weights) all_paths[i].append(all_weights[0] * len_frac[path[1]] * similarity) if len(all_weights) > 1: all_paths[i].append(all_weights[1] * len_frac[path[1]] * similarity) else: all_paths[i].append(0) if len(all_weights) > 2: all_paths[i].append(all_weights[2] * len_frac[path[1]] * similarity) else: all_paths[i].append(0) del all_weights try: all_paths = sorted( all_paths, key=lambda k: (k[3], k[4], k[5]), reverse=True) for best_path in all_paths: if best_path[2] >= 1: selected_paths.append(best_path[0]) key = "_".join(best_path[0]) final_valid_paths[path_name].append(path_obj[key]) if len(selected_paths) == 15: break except: pass def combine_start_and_end_titles(titles_dict): sts = set() ets = set() for degree, end_titles in titles_dict["end_titles"].items(): start_titles = titles_dict["start_titles"][degree] sts.update(start_titles[:30]) ets.update(end_titles[:50]) return [sts, ets] def print_paths(depriotize_starts, major_title_dict1, major_title_dict2, degrees, work_meta_info, min_conf, required_skills_threshold, top_k, edge_count_threshold, index_val): collection_name = "careerPathsForMajors_test_time" db_local = DBUtils(db_name='test', host='localhost') title_skills = read_skill_master() title_time_dict = read_local_skill_master() if index_val == 1: major_title_dict = major_title_dict1 else: major_title_dict = major_title_dict2 for major, titles_dict in major_title_dict.items(): start_time = time.time() final_valid_paths = {} final_valid_paths['name'] = major final_valid_paths['version'] = 4 final_valid_paths['graduate_paths'] = [] final_valid_paths['under_graduate_paths'] = [] final_valid_paths['other_paths'] = [] iterate_over_degrees = ["Graduate", "Under Graduate", "Other"] major_edge_count_threshold = 0.1 #edge_count_threshold [sts, ets] = combine_start_and_end_titles(titles_dict) edges = create_graph_for_majors(major, degrees, work_meta_info, min_conf, depriotize_starts) joblib.dump( edges, '/mnt/data/rohit/major_edge_count/' + "_".join(re.sub("\W+", " ", major).split()) + '_edge_counts.pkl') while major_edge_count_threshold > 0 and iterate_over_degrees: career_graph = create_graph(edges, sts, ets, major_edge_count_threshold) [result_paths, edges] = print_paths_iterator( major, titles_dict, title_skills, required_skills_threshold, career_graph, edges, title_time_dict, iterate_over_degrees) if len(result_paths['graduate_paths'] ) >= 15 and not final_valid_paths['graduate_paths']: final_valid_paths['graduate_paths'].extend(result_paths[ 'graduate_paths']) iterate_over_degrees.remove("Graduate") elif result_paths['graduate_paths'] and len(final_valid_paths[ 'graduate_paths']) < 15: append_new_paths( result_paths, edges, final_valid_paths, path_name='graduate_paths') if len(final_valid_paths['graduate_paths']) >= 15: iterate_over_degrees.remove("Graduate") if len(result_paths['under_graduate_paths'] ) >= 15 and not final_valid_paths['under_graduate_paths']: final_valid_paths['under_graduate_paths'].extend(result_paths[ 'under_graduate_paths']) iterate_over_degrees.remove("Under Graduate") elif result_paths['under_graduate_paths'] and len( final_valid_paths['under_graduate_paths']) < 15: append_new_paths( result_paths, edges, final_valid_paths, path_name='under_graduate_paths') if len(final_valid_paths['under_graduate_paths']) >= 15: iterate_over_degrees.remove("Under Graduate") if len(result_paths['other_paths'] ) >= 15 and not final_valid_paths['other_paths']: final_valid_paths['other_paths'].extend(result_paths[ 'other_paths']) iterate_over_degrees.remove("Other") elif result_paths['other_paths'] and len(final_valid_paths[ 'other_paths']) < 15: append_new_paths( result_paths, edges, final_valid_paths, path_name='other_paths') if len(final_valid_paths['other_paths']) >= 15: iterate_over_degrees.remove("Other") major_edge_count_threshold -= 1 db_local.insert_records(collection_name, final_valid_paths) print "Done Major: {} in {}s".format(major, time.time() - start_time) def read_skill_master(): dbutils = DBUtils(db_name='zippia', host='master.mongodb.d.int.zippia.com') skill_master_dict = {} cursor = dbutils.fetch_data( configurator.commons.SKILL_MASTER, 'cursor', {}, {'lay_title': 1, 'most_popular_soc_codes': 1, 'skill_set': 1}) for elem in cursor: skill_master_dict[elem['lay_title']] = {} skill_master_dict[elem['lay_title']]['soc_code'] = elem[ 'most_popular_soc_codes'][0] skill_master_dict[elem['lay_title']][ 'skill_set'] = [skill[0] for skill in elem['skill_set'][:30]] return skill_master_dict def read_local_skill_master(): db_zippia2 = DBUtils(db_name='zippia2', host='localhost') skill_master_dict = {} cursor = db_zippia2.fetch_data(configurator.commons.SKILL_MASTER, 'cursor', {}, {'lay_title': 1, 'median_time_to_reach': 1}) for elem in cursor: if "median_time_to_reach" in elem: skill_master_dict[elem['lay_title']] = {} skill_master_dict[elem['lay_title']] = elem['median_time_to_reach'] return skill_master_dict def fetch_coverd_majors(): collection_name = "careerPathsForMajors5" cursor = db_local.fetch_data(collection_name, 'cursor', {}, {'name': 1}) return set([elem['name'] for elem in cursor]) def path_main_function(): depriotize_starts = set([ "Assistant Internship", "Externship", "Volunteer", "Tutor", "Administrative Assistant", "Cashier", "Customer Service", "Customer Service Representative", "Server", "Bartender", "Waiter", "Waitress", "Instructor" ]) work_start_index = 1 work_end_index = 15 work_prefix = "W" work_meta_info = [] min_conf = 0 degrees = set([ "Bachelors", "Masters", "Doctorate", "Certificate", "Associate", "License", "Diploma", "Other" ]) for index in range(work_start_index, work_end_index + 1): index_str = str(index) work_meta_info_obj = {} work_meta_info_obj["title"] = "closest_lay_title_" + index_str work_meta_info_obj["confidence"] = "max_confidence_" + index_str work_meta_info_obj["match"] = "is_match_" + index_str work_meta_info_obj["from"] = work_prefix + index_str + "Duration From" work_meta_info_obj["to"] = work_prefix + index_str + "Duration To" work_meta_info.append(work_meta_info_obj) major_title_dict = read_start_titles_and_end_titles( top_k_start=30, top_k_end=50, depriotize_starts=depriotize_starts) print "major title dict created successfully" test_majot_title_dict = {} for major in [ "Biomathematics, Bioinformatics, and Computational Biology", "Ecology, Evolution, Systematics, and Population Biology" ]: #"Economics", "Political Science and Government", "Biology", "Chemistry", "Computer Science", "Statistics", "Physics", test_majot_title_dict[major] = major_title_dict[ major] if major in major_title_dict else {} major_title_dict = test_majot_title_dict print_paths(depriotize_starts, major_title_dict, {}, degrees, work_meta_info, min_conf, 5, 30, 3, 1) print "Done" def path_main_function_parallel(): depriotize_starts = set([ "Assistant Internship", "Externship", "Volunteer", "Tutor", "Administrative Assistant", "Cashier", "Customer Service", "Customer Service Representative", "Server", "Bartender", "Waiter", "Waitress", "Instructor" ]) work_start_index = 1 work_end_index = 15 work_prefix = "W" work_meta_info = [] min_conf = 0 degrees = set([ "Bachelors", "Masters", "Doctorate", "Certificate", "Associate", "License", "Diploma", "Other" ]) for index in range(work_start_index, work_end_index + 1): index_str = str(index) work_meta_info_obj = {} work_meta_info_obj["title"] = "closest_lay_title_" + index_str work_meta_info_obj["confidence"] = "max_confidence_" + index_str work_meta_info_obj["match"] = "is_match_" + index_str work_meta_info_obj["from"] = work_prefix + index_str + "Duration From" work_meta_info_obj["to"] = work_prefix + index_str + "Duration To" work_meta_info.append(work_meta_info_obj) major_title_dict = read_start_titles_and_end_titles( top_k_start=30, top_k_end=50, depriotize_starts=depriotize_starts) print "major title dict created successfully" test1 = {} test2 = {} major_keys = major_title_dict.keys() middle_index = len(major_title_dict) / 2 for i in range(middle_index + 1): test1[major_keys[i]] = major_title_dict[major_keys[i]] for i in range(middle_index + 1, len(major_keys)): test2[major_keys[i]] = major_title_dict[major_keys[i]] del major_title_dict func = partial(print_paths, depriotize_starts, test1, test2, degrees, work_meta_info, min_conf, 5, 30, 3) p = Pool(2) p.map(func, [1, 2]) p.close() print "Done" if __name__ == "__main__": path_main_function_parallel()
gpl-3.0
cellnopt/cellnopt
test/core/test_models.py
1
3795
from cno.core.models import Models import pandas as pd import numpy as np from easydev import TempFile def test_models(): data = np.array([[1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1], [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0], [1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], [1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0], [1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0], [1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1], [1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1], [1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1], [1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1], [1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]]) columns = [u'EGF=PI3K', u'TNFa=PI3K', u'Jnk=cJun', u'PI3K=Akt', u'Raf=Mek', u'!Akt=Mek', u'Mek=p90RSK', u'Mek=Erk', u'Erk=Hsp27', u'TNFa=Jnk', u'TNFa=NFkB', u'TNFa=Hsp27', u'EGF=Raf', u'EGF^TNFa=PI3K', u'Raf^!Akt=Mek', u'Erk^TNFa=Hsp27'] df = pd.DataFrame(data, columns=columns) fh = TempFile() df.to_csv(fh.name) m1 = Models(df) m2 = Models(m1) m3 = Models(fh.name, index_col=0) # there is an index column with no name fh.delete() # trying a stupid constructor try: Models(1) assert False except: assert True return m1, m2 assert m1 == m2 assert m1 == m3 # plots m1.plot() m1.plot(1) m1.plot('cv') m1.errorbar() m1.heatmap() # exporters fh = TempFile() m1.to_csv(fh.name) fh.delete() fh = TempFile() m1.to_sif(fh.name) fh.delete() # m1 and m2 are identical. Adding them gets rid of duplicates so it should be # equal to itself. m1 == m1 + m2 def test_compare_two_models(): from cno.core.models import CompareTwoModels import pandas as pd m1 = pd.Series({'!erk=sos': 0, '!ikb=nfkb': 1, '!pi3k=gsk3': 1, '!tnfa=ikb': 1, '!tnfa^nfkb=ikb': 0, 'egf=pi3k': 0, 'egf=sos': 1, 'egf^!erk=sos': 0, 'map3k1=ap1': 0, 'map3k1=p38': 0, 'map3k1^tnfa=p38': 1, 'nfkb=ikb': 1, 'pi3k=map3k1': 0, 'pi3k^sos=map3k1': 0, 'raf1=erk': 1, 'sos=map3k1': 0, 'sos=raf1': 1, 'tnfa=p38': 0, 'tnfa=pi3k': 0, 'tnfa^egf=pi3k': 1} ) m2 = pd.Series({'!erk=sos': 0, '!ikb=nfkb': 1, '!pi3k=gsk3': 1, '!tnfa=ikb': 1, '!tnfa^nfkb=ikb': 0, 'egf=pi3k': 0, 'egf=sos': 1, 'egf^!erk=sos': 0, 'map3k1=ap1': 1, 'map3k1=p38': 1, 'map3k1^tnfa=p38': 1, 'nfkb=ikb': 1, 'pi3k=map3k1': 1, 'pi3k^sos=map3k1': 0, 'raf1=erk': 1, 'sos=map3k1': 0, 'sos=raf1': 1, 'tnfa=p38': 0, 'tnfa=pi3k': 0, 'tnfa^egf=pi3k': 0}) cm = CompareTwoModels(m1,m2) cm.plot_multigraph()
bsd-2-clause
billbrod/spatial-frequency-preferences
sfp/testing.py
1
3961
#!/usr/bin/python """small script to test displays """ import numpy as np from psychopy import visual, event from psychopy.tools import imagetools import pandas as pd import argparse import warnings def create_alternating_stimuli(size): """create a square stimuli that alternates every other pixel black-white the point of this stimuli is to make sure you can resolve the individual pixels on whatever screen you're using. size: int. the size of the image, in pixels. image will be square """ assert int(size) == size, "Size must be an integer!" size = int(size) x = np.array(range(size)) / float(size) x, _ = np.meshgrid(x, x) # this is frequency * 2pi * x, but since our frequency is half the size, the 2s cancel. return np.cos(size * np.pi * x) def test_display(screen_size, stimulus=None, stimulus_description_csv=None, freqs=None, text=None): """create a psychopy window and display a stimulus if stimulus is None, display create_alternating_stimuli. if a filename ending in npy, load that array in and show that array. if it's an array, display that array """ if not hasattr(screen_size, "__iter__"): screen_size = [screen_size, screen_size] if isinstance(stimulus, str): stimulus = np.load(stimulus) if stimulus_description_csv is not None: stim_df = pd.read_csv(stimulus_description_csv) print(freqs) print(type(freqs)) if 'w_r' in stim_df.columns: stim_idx = stim_df[(stim_df.w_a==freqs[0]) & (stim_df.w_r==freqs[1])].index[0] else: stim_idx = stim_df[(stim_df.w_x==freqs[0]) & (stim_df.w_y==freqs[1])].index[0] stimulus = stimulus[stim_idx, :, :] elif stimulus is None and text is None: stimulus = create_alternating_stimuli(min(screen_size)) win = visual.Window(screen_size, fullscr=True, screen=1, colorSpace='rgb255', color=127, units='pix') if text is None: if stimulus.ndim > 2: warnings.warn("stimulus is more than 2d, assuming it's three and [0,:,:] ...") stimulus = stimulus[0, :, :] stim_shape = stimulus.shape if stimulus.min() < -1 or stimulus.max() > 1: stimulus = imagetools.array2image(stimulus) thing_to_display = visual.ImageStim(win, stimulus, size=stim_shape) else: thing_to_display = visual.TextStim(win, text) thing_to_display.draw() win.flip() all_keys = event.waitKeys(keyList=['q', 'escape']) if 'q' in [k[0] for k in all_keys] or 'escape' in [k[0] for k in all_keys]: win.close() if __name__ == '__main__': parser = argparse.ArgumentParser(description="Test your display") parser.add_argument("screen_size", help="Screen size, in pixels. Can be one or two integers", nargs='+', type=int) parser.add_argument("--stimulus", "-s", help=("Optional, path to stimulus. If not used, will create alternating " "black and white pixels the size of the screen")) parser.add_argument("--stimulus_description_csv", '-d', help=("Optional, path to csv containing description of stimuli. Used with" " --freqs arg to find stimuli with specified frequency")) parser.add_argument("--freqs", '-f', nargs=2, type=float, help=("Optional, 2 floats specifying the frequency of the stimulus to " "display. Should be either w_x, w_y or w_a, w_r")) parser.add_argument("--text", "-t", type=str, help=("Optional, text to display. If set, will not show a grating but " "instead whatever text you enter. Text can be easier to check for " "blur")) args = vars(parser.parse_args()) test_display(**args)
mit
kvh/ramp
ramp/store.py
1
6218
 # -*- coding: utf-8 -*- ''' Store ------- Data storage classes. The default behavior for this module will always attempt to read/write from HDF storage first, and will fall back to pickle storage if required. This module uses the MD5 algorithm to create "safe" unique file names based on provided key values. ''' import cPickle as pickle import hashlib import os import re import pandas try: import tables from tables.exceptions import NoSuchNodeError except ImportError: NoSuchNodeError = None __all__ = ['Storable'] def dumppickle(obj, fname, protocol=-1): """ Pickle object `obj` to file `fname`. """ with open(fname, 'wb') as fout: # 'b' for binary, needed on Windows pickle.dump(obj, fout, protocol=protocol) def loadpickle(fname): """ Load pickled object from `fname` """ return pickle.load(open(fname, 'rb')) class Storable(object): def to_pickle(self, path): dumppickle(self, path) @classmethod def from_pickle(cls, path): obj = loadpickle(path) assert type(obj) == cls return obj def to_string(self): pass def to_hdf5(self, path): pass def to_hdf5_or_pickle(self, path): pass # class DummyStore(object): # def save(self, k, v): # pass # def load(self, k): # raise KeyError # def delete(self, kp): # pass # class Store(object): # def __init__(self, path=None, verbose=False): # """ # ABC for Store classes. Inheriting classes should override get # and put methods. Currently subclasses for HDF5 and cPickle, but # extendable for other data storage types. # Parameters: # ----------- # path: string, default None # Path to data folder # verbose: bool, default False # Set 'True' to print read/write messages # """ # self.path = path # self._shelf = None # self._uncachables = set() # self._cache = {} # self.verbose = verbose # def register_uncachable(self, un): # """Any key containing the substring `un` will NOT be cached """ # self._uncachables.add(un) # def load(self, key): # """ # Loads from cache, otherwise defaults to class 'get' method to load # from store. # """ # try: # v = self._cache[key] # if self.verbose: # print "Retrieving '%s' from local" % key # return v # except KeyError: # v = self.get(key) # if self.verbose: # print "Retrieving '%s' from store" % key # return v # def save(self, key, value): # """ # Saves to cache, otherwise defaults to class 'put' method to load # from store # """ # for un in self._uncachables: # if un in key: # # print "not caching", key # return # self.put(key, value) # self._cache[key] = value # def get(self, key): # raise NotImplementedError # def put(self, key, value): # raise NotImplementedError # class MemoryStore(Store): # """ # Caches values in-memory, no persistence. # """ # def put(self, key, value): pass # def get(self, key): raise KeyError # re_file = re.compile(r'\W+') # class PickleStore(Store): # """ # Pickles values to disk and caches in memory. # """ # def safe_name(self, key): # """Create hex name from key""" # key_name = re_file.sub('_', key) # return '_%s__%s' % (hashlib.md5(key).hexdigest()[:10], key_name[:100]) # def get_fname(self, key): # """Get pickled data path""" # return os.path.join(self.path, self.safe_name(key)) # def put(self, key, value): # """Write safe-named data to pickle""" # dumppickle(value, self.get_fname(key), protocol=0) # def get(self, key): # """Load pickled data using key value""" # try: # return loadpickle(self.get_fname(key)) # except IOError: # raise KeyError # class HDFPickleStore(PickleStore): # """ # Attempts to store objects in HDF5 format (numpy/pandas objects). Pickles them # to disk if that's not possible; also caches values in-memory. # """ # def get_store(self): # """HDF store on self.path""" # return pandas.HDFStore(os.path.join(self.path, 'ramp.h5')) # def put(self, key, value): # """Write Pandas DataFrame or Series to HDF store. Other data types # will default to pickled storage""" # if isinstance(value, pandas.DataFrame) or isinstance(value, pandas.Series): # self.get_store()[self.safe_name(key)] = value # else: # super(HDFPickleStore, self).put(key, value) # def get(self, key): # """Get data from HDF store. If store does not contain key or data, # will try to load pickled data.""" # try: # return self.get_store()[self.safe_name(key)] # except (KeyError, NoSuchNodeError): # pass # return super(HDFPickleStore, self).get(key) # class ShelfStore(Store): # """ # Deprecated # """ # def get_store(self): # if self._shelf is None: # self._shelf = shelve.open(self.path) # return self._shelf # def delete(self, keypart): # s = self.get_store() # # TODO: iterating keys is stupid slow for a shelf # for k in s.keys(): # if keypart in k: # if self.verbose: # print "Deleting '%s' from store"%k # del s[k] # def put(self, key, value): # store = self.get_store() # store[key] = value # self._cache[key] = value # def get(self, key): # return self.get_store()[key] # try: # tables # default_store = HDFPickleStore # except NameError: # print "Defaulting to basic pickle store. It is recommended \ # you install PyTables for fast HDF5 format." # default_store = PickleStore
mit
JazzeYoung/VeryDeepAutoEncoder
pylearn2/models/tests/test_svm.py
28
1398
"""Tests for DenseMulticlassSVM""" from __future__ import print_function from pylearn2.datasets.mnist import MNIST from pylearn2.testing.skip import skip_if_no_sklearn, skip_if_no_data import numpy as np from theano.compat.six.moves import xrange import unittest DenseMulticlassSVM = None class TestSVM(unittest.TestCase): """ Test class for DenseMulticlassSVM Parameters ---------- Inherited from unittest.TestCase """ def setUp(self): """ Set up test for DenseMulticlassSVM. Imports DenseMulticlassSVM if available, skips the test otherwise. """ global DenseMulticlassSVM skip_if_no_sklearn() skip_if_no_data() import pylearn2.models.svm DenseMulticlassSVM = pylearn2.models.svm.DenseMulticlassSVM def test_decision_function(self): """ Test DenseMulticlassSVM.decision_function. """ dataset = MNIST(which_set='train') X = dataset.X[0:20, :] y = dataset.y[0:20] for i in xrange(10): assert (y == i).sum() > 0 model = DenseMulticlassSVM(kernel='poly', C=1.0).fit(X, y) f = model.decision_function(X) print(f) yhat_f = np.argmax(f, axis=1) yhat = np.cast[yhat_f.dtype](model.predict(X)) print(yhat_f) print(yhat) assert (yhat_f != yhat).sum() == 0
bsd-3-clause
gengliangwang/spark
python/pyspark/sql/tests/test_arrow.py
15
27974
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime import os import threading import time import unittest import warnings from distutils.version import LooseVersion from pyspark import SparkContext, SparkConf from pyspark.sql import Row, SparkSession from pyspark.sql.functions import rand, udf from pyspark.sql.types import StructType, StringType, IntegerType, LongType, \ FloatType, DoubleType, DecimalType, DateType, TimestampType, BinaryType, StructField, \ ArrayType, NullType from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \ pandas_requirement_message, pyarrow_requirement_message from pyspark.testing.utils import QuietTest if have_pandas: import pandas as pd from pandas.testing import assert_frame_equal if have_pyarrow: import pyarrow as pa # noqa: F401 @unittest.skipIf( not have_pandas or not have_pyarrow, pandas_requirement_message or pyarrow_requirement_message) # type: ignore class ArrowTests(ReusedSQLTestCase): @classmethod def setUpClass(cls): from datetime import date, datetime from decimal import Decimal super(ArrowTests, cls).setUpClass() cls.warnings_lock = threading.Lock() # Synchronize default timezone between Python and Java cls.tz_prev = os.environ.get("TZ", None) # save current tz if set tz = "America/Los_Angeles" os.environ["TZ"] = tz time.tzset() cls.spark.conf.set("spark.sql.session.timeZone", tz) # Test fallback cls.spark.conf.set("spark.sql.execution.arrow.enabled", "false") assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "false" cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true") assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "true" cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "true") assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "true" cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false") assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "false" # Enable Arrow optimization in this tests. cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true") # Disable fallback by default to easily detect the failures. cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false") cls.schema_wo_null = StructType([ StructField("1_str_t", StringType(), True), StructField("2_int_t", IntegerType(), True), StructField("3_long_t", LongType(), True), StructField("4_float_t", FloatType(), True), StructField("5_double_t", DoubleType(), True), StructField("6_decimal_t", DecimalType(38, 18), True), StructField("7_date_t", DateType(), True), StructField("8_timestamp_t", TimestampType(), True), StructField("9_binary_t", BinaryType(), True)]) cls.schema = cls.schema_wo_null.add("10_null_t", NullType(), True) cls.data_wo_null = [ (u"a", 1, 10, 0.2, 2.0, Decimal("2.0"), date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1), bytearray(b"a")), (u"b", 2, 20, 0.4, 4.0, Decimal("4.0"), date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2), bytearray(b"bb")), (u"c", 3, 30, 0.8, 6.0, Decimal("6.0"), date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3), bytearray(b"ccc")), (u"d", 4, 40, 1.0, 8.0, Decimal("8.0"), date(2262, 4, 12), datetime(2262, 3, 3, 3, 3, 3), bytearray(b"dddd")), ] cls.data = [tuple(list(d) + [None]) for d in cls.data_wo_null] @classmethod def tearDownClass(cls): del os.environ["TZ"] if cls.tz_prev is not None: os.environ["TZ"] = cls.tz_prev time.tzset() super(ArrowTests, cls).tearDownClass() def create_pandas_data_frame(self): import numpy as np data_dict = {} for j, name in enumerate(self.schema.names): data_dict[name] = [self.data[i][j] for i in range(len(self.data))] # need to convert these to numpy types first data_dict["2_int_t"] = np.int32(data_dict["2_int_t"]) data_dict["4_float_t"] = np.float32(data_dict["4_float_t"]) return pd.DataFrame(data=data_dict) def test_toPandas_fallback_enabled(self): ts = datetime.datetime(2015, 11, 1, 0, 30) with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}): schema = StructType([StructField("a", ArrayType(TimestampType()), True)]) df = self.spark.createDataFrame([([ts],)], schema=schema) with QuietTest(self.sc): with self.warnings_lock: with warnings.catch_warnings(record=True) as warns: # we want the warnings to appear even if this test is run from a subclass warnings.simplefilter("always") pdf = df.toPandas() # Catch and check the last UserWarning. user_warns = [ warn.message for warn in warns if isinstance(warn.message, UserWarning)] self.assertTrue(len(user_warns) > 0) self.assertTrue( "Attempting non-optimization" in str(user_warns[-1])) assert_frame_equal(pdf, pd.DataFrame({"a": [[ts]]})) def test_toPandas_fallback_disabled(self): schema = StructType([StructField("a", ArrayType(TimestampType()), True)]) df = self.spark.createDataFrame([(None,)], schema=schema) with QuietTest(self.sc): with self.warnings_lock: with self.assertRaisesRegex(Exception, 'Unsupported type'): df.toPandas() def test_null_conversion(self): df_null = self.spark.createDataFrame( [tuple([None for _ in range(len(self.data_wo_null[0]))])] + self.data_wo_null) pdf = df_null.toPandas() null_counts = pdf.isnull().sum().tolist() self.assertTrue(all([c == 1 for c in null_counts])) def _toPandas_arrow_toggle(self, df): with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}): pdf = df.toPandas() pdf_arrow = df.toPandas() return pdf, pdf_arrow def test_toPandas_arrow_toggle(self): df = self.spark.createDataFrame(self.data, schema=self.schema) pdf, pdf_arrow = self._toPandas_arrow_toggle(df) expected = self.create_pandas_data_frame() assert_frame_equal(expected, pdf) assert_frame_equal(expected, pdf_arrow) def test_toPandas_respect_session_timezone(self): df = self.spark.createDataFrame(self.data, schema=self.schema) timezone = "America/Los_Angeles" with self.sql_conf({"spark.sql.session.timeZone": timezone}): pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df) assert_frame_equal(pdf_arrow_la, pdf_la) timezone = "America/New_York" with self.sql_conf({"spark.sql.session.timeZone": timezone}): pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df) assert_frame_equal(pdf_arrow_ny, pdf_ny) self.assertFalse(pdf_ny.equals(pdf_la)) from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz pdf_la_corrected = pdf_la.copy() for field in self.schema: if isinstance(field.dataType, TimestampType): pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz( pdf_la_corrected[field.name], timezone) assert_frame_equal(pdf_ny, pdf_la_corrected) def test_pandas_round_trip(self): pdf = self.create_pandas_data_frame() df = self.spark.createDataFrame(self.data, schema=self.schema) pdf_arrow = df.toPandas() assert_frame_equal(pdf_arrow, pdf) def test_pandas_self_destruct(self): import pyarrow as pa rows = 2 ** 10 cols = 4 expected_bytes = rows * cols * 8 df = self.spark.range(0, rows).select(*[rand() for _ in range(cols)]) # Test the self_destruct behavior by testing _collect_as_arrow directly allocation_before = pa.total_allocated_bytes() batches = df._collect_as_arrow(split_batches=True) table = pa.Table.from_batches(batches) del batches pdf_split = table.to_pandas(self_destruct=True, split_blocks=True, use_threads=False) allocation_after = pa.total_allocated_bytes() difference = allocation_after - allocation_before # Should be around 1x the data size (table should not hold on to any memory) self.assertGreaterEqual(difference, 0.9 * expected_bytes) self.assertLessEqual(difference, 1.1 * expected_bytes) with self.sql_conf({"spark.sql.execution.arrow.pyspark.selfDestruct.enabled": False}): no_self_destruct_pdf = df.toPandas() # Note while memory usage is 2x data size here (both table and pdf hold on to # memory), in this case Arrow still only tracks 1x worth of memory (since the # batches are not allocated by Arrow in this case), so we can't make any # assertions here with self.sql_conf({"spark.sql.execution.arrow.pyspark.selfDestruct.enabled": True}): self_destruct_pdf = df.toPandas() assert_frame_equal(pdf_split, no_self_destruct_pdf) assert_frame_equal(pdf_split, self_destruct_pdf) def test_filtered_frame(self): df = self.spark.range(3).toDF("i") pdf = df.filter("i < 0").toPandas() self.assertEqual(len(pdf.columns), 1) self.assertEqual(pdf.columns[0], "i") self.assertTrue(pdf.empty) def test_no_partition_frame(self): schema = StructType([StructField("field1", StringType(), True)]) df = self.spark.createDataFrame(self.sc.emptyRDD(), schema) pdf = df.toPandas() self.assertEqual(len(pdf.columns), 1) self.assertEqual(pdf.columns[0], "field1") self.assertTrue(pdf.empty) def test_propagates_spark_exception(self): df = self.spark.range(3).toDF("i") def raise_exception(): raise RuntimeError("My error") exception_udf = udf(raise_exception, IntegerType()) df = df.withColumn("error", exception_udf()) with QuietTest(self.sc): with self.assertRaisesRegex(Exception, 'My error'): df.toPandas() def _createDataFrame_toggle(self, pdf, schema=None): with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}): df_no_arrow = self.spark.createDataFrame(pdf, schema=schema) df_arrow = self.spark.createDataFrame(pdf, schema=schema) return df_no_arrow, df_arrow def test_createDataFrame_toggle(self): pdf = self.create_pandas_data_frame() df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema) self.assertEqual(df_no_arrow.collect(), df_arrow.collect()) def test_createDataFrame_respect_session_timezone(self): from datetime import timedelta pdf = self.create_pandas_data_frame() timezone = "America/Los_Angeles" with self.sql_conf({"spark.sql.session.timeZone": timezone}): df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema) result_la = df_no_arrow_la.collect() result_arrow_la = df_arrow_la.collect() self.assertEqual(result_la, result_arrow_la) timezone = "America/New_York" with self.sql_conf({"spark.sql.session.timeZone": timezone}): df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema) result_ny = df_no_arrow_ny.collect() result_arrow_ny = df_arrow_ny.collect() self.assertEqual(result_ny, result_arrow_ny) self.assertNotEqual(result_ny, result_la) # Correct result_la by adjusting 3 hours difference between Los Angeles and New York result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v for k, v in row.asDict().items()}) for row in result_la] self.assertEqual(result_ny, result_la_corrected) def test_createDataFrame_with_schema(self): pdf = self.create_pandas_data_frame() df = self.spark.createDataFrame(pdf, schema=self.schema) self.assertEqual(self.schema, df.schema) pdf_arrow = df.toPandas() assert_frame_equal(pdf_arrow, pdf) def test_createDataFrame_with_incorrect_schema(self): pdf = self.create_pandas_data_frame() fields = list(self.schema) fields[5], fields[6] = fields[6], fields[5] # swap decimal with date wrong_schema = StructType(fields) with self.sql_conf({"spark.sql.execution.pandas.convertToArrowArraySafely": False}): with QuietTest(self.sc): with self.assertRaisesRegex(Exception, "[D|d]ecimal.*got.*date"): self.spark.createDataFrame(pdf, schema=wrong_schema) def test_createDataFrame_with_names(self): pdf = self.create_pandas_data_frame() new_names = list(map(str, range(len(self.schema.fieldNames())))) # Test that schema as a list of column names gets applied df = self.spark.createDataFrame(pdf, schema=list(new_names)) self.assertEqual(df.schema.fieldNames(), new_names) # Test that schema as tuple of column names gets applied df = self.spark.createDataFrame(pdf, schema=tuple(new_names)) self.assertEqual(df.schema.fieldNames(), new_names) def test_createDataFrame_column_name_encoding(self): pdf = pd.DataFrame({u'a': [1]}) columns = self.spark.createDataFrame(pdf).columns self.assertTrue(isinstance(columns[0], str)) self.assertEqual(columns[0], 'a') columns = self.spark.createDataFrame(pdf, [u'b']).columns self.assertTrue(isinstance(columns[0], str)) self.assertEqual(columns[0], 'b') def test_createDataFrame_with_single_data_type(self): with QuietTest(self.sc): with self.assertRaisesRegex(ValueError, ".*IntegerType.*not supported.*"): self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int") def test_createDataFrame_does_not_modify_input(self): # Some series get converted for Spark to consume, this makes sure input is unchanged pdf = self.create_pandas_data_frame() # Use a nanosecond value to make sure it is not truncated pdf.iloc[0, 7] = pd.Timestamp(1) # Integers with nulls will get NaNs filled with 0 and will be casted pdf.iloc[1, 1] = None pdf_copy = pdf.copy(deep=True) self.spark.createDataFrame(pdf, schema=self.schema) self.assertTrue(pdf.equals(pdf_copy)) def test_schema_conversion_roundtrip(self): from pyspark.sql.pandas.types import from_arrow_schema, to_arrow_schema arrow_schema = to_arrow_schema(self.schema) schema_rt = from_arrow_schema(arrow_schema) self.assertEqual(self.schema, schema_rt) def test_createDataFrame_with_array_type(self): pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]}) df, df_arrow = self._createDataFrame_toggle(pdf) result = df.collect() result_arrow = df_arrow.collect() expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)] for r in range(len(expected)): for e in range(len(expected[r])): self.assertTrue(expected[r][e] == result_arrow[r][e] and result[r][e] == result_arrow[r][e]) def test_toPandas_with_array_type(self): expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])] array_schema = StructType([StructField("a", ArrayType(IntegerType())), StructField("b", ArrayType(StringType()))]) df = self.spark.createDataFrame(expected, schema=array_schema) pdf, pdf_arrow = self._toPandas_arrow_toggle(df) result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)] result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)] for r in range(len(expected)): for e in range(len(expected[r])): self.assertTrue(expected[r][e] == result_arrow[r][e] and result[r][e] == result_arrow[r][e]) def test_createDataFrame_with_map_type(self): map_data = [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}] pdf = pd.DataFrame({"id": [0, 1, 2, 3, 4], "m": map_data}) schema = "id long, m map<string, long>" with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}): df = self.spark.createDataFrame(pdf, schema=schema) if LooseVersion(pa.__version__) < LooseVersion("2.0.0"): with QuietTest(self.sc): with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"): self.spark.createDataFrame(pdf, schema=schema) else: df_arrow = self.spark.createDataFrame(pdf, schema=schema) result = df.collect() result_arrow = df_arrow.collect() self.assertEqual(len(result), len(result_arrow)) for row, row_arrow in zip(result, result_arrow): i, m = row _, m_arrow = row_arrow self.assertEqual(m, map_data[i]) self.assertEqual(m_arrow, map_data[i]) def test_toPandas_with_map_type(self): pdf = pd.DataFrame({"id": [0, 1, 2, 3], "m": [{}, {"a": 1}, {"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}]}) with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}): df = self.spark.createDataFrame(pdf, schema="id long, m map<string, long>") if LooseVersion(pa.__version__) < LooseVersion("2.0.0"): with QuietTest(self.sc): with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"): df.toPandas() else: pdf_non, pdf_arrow = self._toPandas_arrow_toggle(df) assert_frame_equal(pdf_arrow, pdf_non) def test_toPandas_with_map_type_nulls(self): pdf = pd.DataFrame({"id": [0, 1, 2, 3, 4], "m": [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}]}) with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}): df = self.spark.createDataFrame(pdf, schema="id long, m map<string, long>") if LooseVersion(pa.__version__) < LooseVersion("2.0.0"): with QuietTest(self.sc): with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"): df.toPandas() else: pdf_non, pdf_arrow = self._toPandas_arrow_toggle(df) assert_frame_equal(pdf_arrow, pdf_non) def test_createDataFrame_with_int_col_names(self): import numpy as np pdf = pd.DataFrame(np.random.rand(4, 2)) df, df_arrow = self._createDataFrame_toggle(pdf) pdf_col_names = [str(c) for c in pdf.columns] self.assertEqual(pdf_col_names, df.columns) self.assertEqual(pdf_col_names, df_arrow.columns) def test_createDataFrame_fallback_enabled(self): ts = datetime.datetime(2015, 11, 1, 0, 30) with QuietTest(self.sc): with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}): with warnings.catch_warnings(record=True) as warns: # we want the warnings to appear even if this test is run from a subclass warnings.simplefilter("always") df = self.spark.createDataFrame( pd.DataFrame({"a": [[ts]]}), "a: array<timestamp>") # Catch and check the last UserWarning. user_warns = [ warn.message for warn in warns if isinstance(warn.message, UserWarning)] self.assertTrue(len(user_warns) > 0) self.assertTrue( "Attempting non-optimization" in str(user_warns[-1])) self.assertEqual(df.collect(), [Row(a=[ts])]) def test_createDataFrame_fallback_disabled(self): with QuietTest(self.sc): with self.assertRaisesRegex(TypeError, 'Unsupported type'): self.spark.createDataFrame( pd.DataFrame({"a": [[datetime.datetime(2015, 11, 1, 0, 30)]]}), "a: array<timestamp>") # Regression test for SPARK-23314 def test_timestamp_dst(self): # Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am dt = [datetime.datetime(2015, 11, 1, 0, 30), datetime.datetime(2015, 11, 1, 1, 30), datetime.datetime(2015, 11, 1, 2, 30)] pdf = pd.DataFrame({'time': dt}) df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time') df_from_pandas = self.spark.createDataFrame(pdf) assert_frame_equal(pdf, df_from_python.toPandas()) assert_frame_equal(pdf, df_from_pandas.toPandas()) # Regression test for SPARK-28003 def test_timestamp_nat(self): dt = [pd.NaT, pd.Timestamp('2019-06-11'), None] * 100 pdf = pd.DataFrame({'time': dt}) df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf) assert_frame_equal(pdf, df_no_arrow.toPandas()) assert_frame_equal(pdf, df_arrow.toPandas()) def test_toPandas_batch_order(self): def delay_first_part(partition_index, iterator): if partition_index == 0: time.sleep(0.1) return iterator # Collects Arrow RecordBatches out of order in driver JVM then re-orders in Python def run_test(num_records, num_parts, max_records, use_delay=False): df = self.spark.range(num_records, numPartitions=num_parts).toDF("a") if use_delay: df = df.rdd.mapPartitionsWithIndex(delay_first_part).toDF() with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": max_records}): pdf, pdf_arrow = self._toPandas_arrow_toggle(df) assert_frame_equal(pdf, pdf_arrow) cases = [ (1024, 512, 2), # Use large num partitions for more likely collecting out of order (64, 8, 2, True), # Use delay in first partition to force collecting out of order (64, 64, 1), # Test single batch per partition (64, 1, 64), # Test single partition, single batch (64, 1, 8), # Test single partition, multiple batches (30, 7, 2), # Test different sized partitions ] for case in cases: run_test(*case) def test_createDateFrame_with_category_type(self): pdf = pd.DataFrame({"A": [u"a", u"b", u"c", u"a"]}) pdf["B"] = pdf["A"].astype('category') category_first_element = dict(enumerate(pdf['B'].cat.categories))[0] with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}): arrow_df = self.spark.createDataFrame(pdf) arrow_type = arrow_df.dtypes[1][1] result_arrow = arrow_df.toPandas() arrow_first_category_element = result_arrow["B"][0] with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}): df = self.spark.createDataFrame(pdf) spark_type = df.dtypes[1][1] result_spark = df.toPandas() spark_first_category_element = result_spark["B"][0] assert_frame_equal(result_spark, result_arrow) # ensure original category elements are string self.assertIsInstance(category_first_element, str) # spark data frame and arrow execution mode enabled data frame type must match pandas self.assertEqual(spark_type, 'string') self.assertEqual(arrow_type, 'string') self.assertIsInstance(arrow_first_category_element, str) self.assertIsInstance(spark_first_category_element, str) def test_createDataFrame_with_float_index(self): # SPARK-32098: float index should not produce duplicated or truncated Spark DataFrame self.assertEqual( self.spark.createDataFrame( pd.DataFrame({'a': [1, 2, 3]}, index=[2., 3., 4.])).distinct().count(), 3) def test_no_partition_toPandas(self): # SPARK-32301: toPandas should work from a Spark DataFrame with no partitions # Forward-ported from SPARK-32300. pdf = self.spark.sparkContext.emptyRDD().toDF("col1 int").toPandas() self.assertEqual(len(pdf), 0) self.assertEqual(list(pdf.columns), ["col1"]) def test_createDataFrame_empty_partition(self): pdf = pd.DataFrame({"c1": [1], "c2": ["string"]}) df = self.spark.createDataFrame(pdf) self.assertEqual([Row(c1=1, c2='string')], df.collect()) self.assertGreater(self.spark.sparkContext.defaultParallelism, len(pdf)) @unittest.skipIf( not have_pandas or not have_pyarrow, pandas_requirement_message or pyarrow_requirement_message) # type: ignore class MaxResultArrowTests(unittest.TestCase): # These tests are separate as 'spark.driver.maxResultSize' configuration # is a static configuration to Spark context. @classmethod def setUpClass(cls): cls.spark = SparkSession(SparkContext( 'local[4]', cls.__name__, conf=SparkConf().set("spark.driver.maxResultSize", "10k"))) # Explicitly enable Arrow and disable fallback. cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true") cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false") @classmethod def tearDownClass(cls): if hasattr(cls, "spark"): cls.spark.stop() def test_exception_by_max_results(self): with self.assertRaisesRegex(Exception, "is bigger than"): self.spark.range(0, 10000, 1, 100).toPandas() class EncryptionArrowTests(ArrowTests): @classmethod def conf(cls): return super(EncryptionArrowTests, cls).conf().set("spark.io.encryption.enabled", "true") if __name__ == "__main__": from pyspark.sql.tests.test_arrow import * # noqa: F401 try: import xmlrunner # type: ignore testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache-2.0
chintak/face_detection
models.py
1
15942
import numpy as np import os import theano import theano.tensor as T import lasagne from lasagne import layers from lasagne.init import Orthogonal from lasagne.updates import nesterov_momentum from nolearn.lasagne import NeuralNet from nolearn.lasagne import BatchIterator from lazy_batch_iterator import LazyBatchIterator from augment_batch_iterator import AugmentBatchIterator from helper import AdjustVariable, StepVariableUpdate from sklearn.metrics import mean_squared_error from objectives import iou_loss_val, smooth_l1_loss, smooth_l1_loss_val from joblib import Parallel, delayed NET_CONFIGS = [ 'config_4c_1234_3d_smoothl1_lr_step', 'config_4c_1234_3d_smoothl1_lr_linear', 'config_4c_1234_3d_squaredloss_lr_linear', 'config_4c_1233_3d' ] def config_4c_1234_3d_smoothl1_lr_step(batch_iterator="BatchIterator", max_epochs=30): custom_batch_iterator = globals()[batch_iterator] net1 = NeuralNet( layers=[ ('input', layers.InputLayer), ('conv1_1', layers.Conv2DLayer), ('conv1_2', layers.Conv2DLayer), ('pool1', layers.MaxPool2DLayer), ('conv2_1', layers.Conv2DLayer), ('conv2_2', layers.Conv2DLayer), ('pool2', layers.MaxPool2DLayer), ('conv3_1', layers.Conv2DLayer), ('conv3_2', layers.Conv2DLayer), ('conv3_3', layers.Conv2DLayer), ('pool3', layers.MaxPool2DLayer), ('conv4_1', layers.Conv2DLayer), ('conv4_2', layers.Conv2DLayer), ('conv4_3', layers.Conv2DLayer), ('conv4_4', layers.Conv2DLayer), ('pool4', layers.MaxPool2DLayer), ('dense1', layers.DenseLayer), # ('drop1', layers.DropoutLayer), ('dense2', layers.DenseLayer), # ('drop2', layers.DropoutLayer), ('dense3', layers.DenseLayer), # ('drop3', layers.DropoutLayer), ('output', layers.DenseLayer), ], # layer parameters: input_shape=(None, 3, 256, 256), conv1_1_num_filters=86, conv1_1_filter_size=(5, 5), conv1_1_stride=(2, 2), conv1_2_num_filters=104, conv1_2_filter_size=(3, 3), conv1_2_pad=(1, 1), pool1_pool_size=(2, 2), pool1_stride=(2, 2), conv2_1_num_filters=128, conv2_1_filter_size=(3, 3), conv2_1_pad=(1, 1), conv2_2_num_filters=128, conv2_2_filter_size=(3, 3), conv2_2_pad=(1, 1), pool2_pool_size=(3, 3), pool2_stride=(2, 2), conv3_1_num_filters=256, conv3_1_filter_size=(3, 3), conv3_1_pad=(1, 1), conv3_2_num_filters=256, conv3_2_filter_size=(3, 3), conv3_2_pad=(1, 1), conv3_3_num_filters=256, conv3_3_filter_size=(3, 3), conv3_3_pad=(1, 1), pool3_pool_size=(3, 3), pool3_stride=(2, 2), conv4_1_num_filters=196, conv4_1_filter_size=(3, 3), conv4_1_pad=(1, 1), conv4_2_num_filters=196, conv4_2_filter_size=(3, 3), conv4_2_pad=(1, 1), conv4_3_num_filters=196, conv4_3_filter_size=(3, 3), conv4_3_pad=(1, 1), conv4_4_num_filters=196, conv4_4_filter_size=(3, 3), conv4_4_pad=(1, 1), pool4_pool_size=(2, 2), pool4_stride=(2, 2), conv1_1_W=Orthogonal(gain=1.0), conv1_2_W=Orthogonal(gain=1.0), conv2_1_W=Orthogonal(gain=1.0), conv2_2_W=Orthogonal(gain=1.0), conv3_1_W=Orthogonal(gain=1.0), conv3_2_W=Orthogonal(gain=1.0), conv3_3_W=Orthogonal(gain=1.0), conv4_1_W=Orthogonal(gain=1.0), conv4_2_W=Orthogonal(gain=1.0), conv4_3_W=Orthogonal(gain=1.0), conv4_4_W=Orthogonal(gain=1.0), dense1_num_units=4096, # drop1_p=0.5, dense2_num_units=2048, # drop2_p=0.5, dense3_num_units=512, # drop3_p=0.5, dense3_nonlinearity=lasagne.nonlinearities.sigmoid, # output layer uses identity function output_nonlinearity=None, output_num_units=4, # optimization method: update=nesterov_momentum, update_learning_rate=theano.shared(np.float32(0.0001)), update_momentum=theano.shared(np.float32(0.9)), batch_iterator_train=custom_batch_iterator(batch_size=72), batch_iterator_test=custom_batch_iterator(batch_size=48), on_epoch_finished=[ StepVariableUpdate('update_learning_rate', changes={ 30: 0.00005 }), AdjustVariable('update_momentum', start=0.9, stop=0.98) ], objective_loss_function=smooth_l1_loss, # objective_loss_function=iou_loss, custom_scores=[ # ('smoothl1', smooth_l1_loss_val), ('iou_loss', iou_loss_val), ('squared_error', mean_squared_error) ], regression=True, max_epochs=max_epochs, verbose=1, ) return net1 def config_4c_1234_3d_smoothl1_lr_linear(batch_iterator="BatchIterator", max_epochs=30): custom_batch_iterator = globals()[batch_iterator] net1 = NeuralNet( layers=[ ('input', layers.InputLayer), ('conv1_1', layers.Conv2DLayer), ('conv1_2', layers.Conv2DLayer), ('pool1', layers.MaxPool2DLayer), ('conv2_1', layers.Conv2DLayer), ('conv2_2', layers.Conv2DLayer), ('pool2', layers.MaxPool2DLayer), ('conv3_1', layers.Conv2DLayer), ('conv3_2', layers.Conv2DLayer), ('conv3_3', layers.Conv2DLayer), ('pool3', layers.MaxPool2DLayer), ('conv4_1', layers.Conv2DLayer), ('conv4_2', layers.Conv2DLayer), ('conv4_3', layers.Conv2DLayer), ('conv4_4', layers.Conv2DLayer), ('pool4', layers.MaxPool2DLayer), ('dense1', layers.DenseLayer), # ('drop1', layers.DropoutLayer), ('dense2', layers.DenseLayer), # ('drop2', layers.DropoutLayer), ('dense3', layers.DenseLayer), # ('drop3', layers.DropoutLayer), ('output', layers.DenseLayer), ], # layer parameters: input_shape=(None, 3, 256, 256), conv1_1_num_filters=86, conv1_1_filter_size=(5, 5), conv1_1_stride=(2, 2), conv1_2_num_filters=104, conv1_2_filter_size=(3, 3), conv1_2_pad=(1, 1), pool1_pool_size=(2, 2), pool1_stride=(2, 2), conv2_1_num_filters=128, conv2_1_filter_size=(3, 3), conv2_1_pad=(1, 1), conv2_2_num_filters=128, conv2_2_filter_size=(3, 3), conv2_2_pad=(1, 1), pool2_pool_size=(3, 3), pool2_stride=(2, 2), conv3_1_num_filters=256, conv3_1_filter_size=(3, 3), conv3_1_pad=(1, 1), conv3_2_num_filters=256, conv3_2_filter_size=(3, 3), conv3_2_pad=(1, 1), conv3_3_num_filters=256, conv3_3_filter_size=(3, 3), conv3_3_pad=(1, 1), pool3_pool_size=(3, 3), pool3_stride=(2, 2), conv4_1_num_filters=196, conv4_1_filter_size=(3, 3), conv4_1_pad=(1, 1), conv4_2_num_filters=196, conv4_2_filter_size=(3, 3), conv4_2_pad=(1, 1), conv4_3_num_filters=196, conv4_3_filter_size=(3, 3), conv4_3_pad=(1, 1), conv4_4_num_filters=196, conv4_4_filter_size=(3, 3), conv4_4_pad=(1, 1), pool4_pool_size=(2, 2), pool4_stride=(2, 2), conv1_1_W=Orthogonal(gain=1.0), conv1_2_W=Orthogonal(gain=1.0), conv2_1_W=Orthogonal(gain=1.0), conv2_2_W=Orthogonal(gain=1.0), conv3_1_W=Orthogonal(gain=1.0), conv3_2_W=Orthogonal(gain=1.0), conv3_3_W=Orthogonal(gain=1.0), conv4_1_W=Orthogonal(gain=1.0), conv4_2_W=Orthogonal(gain=1.0), conv4_3_W=Orthogonal(gain=1.0), conv4_4_W=Orthogonal(gain=1.0), dense1_num_units=4096, # drop1_p=0.5, dense2_num_units=2048, # drop2_p=0.5, dense3_num_units=512, # drop3_p=0.5, dense3_nonlinearity=lasagne.nonlinearities.sigmoid, # output layer uses identity function output_nonlinearity=None, output_num_units=4, # optimization method: update=nesterov_momentum, update_learning_rate=theano.shared(np.float32(0.001)), update_momentum=theano.shared(np.float32(0.9)), batch_iterator_train=custom_batch_iterator(batch_size=72), batch_iterator_test=custom_batch_iterator(batch_size=48), on_epoch_finished=[ AdjustVariable('update_learning_rate', start=0.001, stop=0.00005), AdjustVariable('update_momentum', start=0.9, stop=0.98) ], objective_loss_function=smooth_l1_loss, # objective_loss_function=iou_loss, custom_scores=[ # ('smoothl1', smooth_l1_loss_val), ('iou_loss', iou_loss_val), ('squared_error', mean_squared_error) ], regression=True, max_epochs=max_epochs, verbose=1, ) return net1 def config_4c_1234_3d_squaredloss_lr_linear(batch_iterator="BatchIterator", max_epochs=30): custom_batch_iterator = globals()[batch_iterator] net1 = NeuralNet( layers=[ ('input', layers.InputLayer), ('conv1_1', layers.Conv2DLayer), ('conv1_2', layers.Conv2DLayer), ('pool1', layers.MaxPool2DLayer), ('conv2_1', layers.Conv2DLayer), ('conv2_2', layers.Conv2DLayer), ('pool2', layers.MaxPool2DLayer), ('conv3_1', layers.Conv2DLayer), ('conv3_2', layers.Conv2DLayer), ('conv3_3', layers.Conv2DLayer), ('pool3', layers.MaxPool2DLayer), ('conv4_1', layers.Conv2DLayer), ('conv4_2', layers.Conv2DLayer), ('conv4_3', layers.Conv2DLayer), ('conv4_4', layers.Conv2DLayer), ('pool4', layers.MaxPool2DLayer), ('dense1', layers.DenseLayer), # ('drop1', layers.DropoutLayer), ('dense2', layers.DenseLayer), # ('drop2', layers.DropoutLayer), ('dense3', layers.DenseLayer), # ('drop3', layers.DropoutLayer), ('output', layers.DenseLayer), ], # layer parameters: input_shape=(None, 3, 256, 256), conv1_1_num_filters=86, conv1_1_filter_size=(5, 5), conv1_1_stride=(2, 2), conv1_2_num_filters=104, conv1_2_filter_size=(3, 3), conv1_2_pad=(1, 1), pool1_pool_size=(2, 2), pool1_stride=(2, 2), conv2_1_num_filters=128, conv2_1_filter_size=(3, 3), conv2_1_pad=(1, 1), conv2_2_num_filters=128, conv2_2_filter_size=(3, 3), conv2_2_pad=(1, 1), pool2_pool_size=(3, 3), pool2_stride=(2, 2), conv3_1_num_filters=256, conv3_1_filter_size=(3, 3), conv3_1_pad=(1, 1), conv3_2_num_filters=256, conv3_2_filter_size=(3, 3), conv3_2_pad=(1, 1), conv3_3_num_filters=256, conv3_3_filter_size=(3, 3), conv3_3_pad=(1, 1), pool3_pool_size=(3, 3), pool3_stride=(2, 2), conv4_1_num_filters=196, conv4_1_filter_size=(3, 3), conv4_1_pad=(1, 1), conv4_2_num_filters=196, conv4_2_filter_size=(3, 3), conv4_2_pad=(1, 1), conv4_3_num_filters=196, conv4_3_filter_size=(3, 3), conv4_3_pad=(1, 1), conv4_4_num_filters=196, conv4_4_filter_size=(3, 3), conv4_4_pad=(1, 1), pool4_pool_size=(2, 2), pool4_stride=(2, 2), conv1_1_W=Orthogonal(gain=1.0), conv1_2_W=Orthogonal(gain=1.0), conv2_1_W=Orthogonal(gain=1.0), conv2_2_W=Orthogonal(gain=1.0), conv3_1_W=Orthogonal(gain=1.0), conv3_2_W=Orthogonal(gain=1.0), conv3_3_W=Orthogonal(gain=1.0), conv4_1_W=Orthogonal(gain=1.0), conv4_2_W=Orthogonal(gain=1.0), conv4_3_W=Orthogonal(gain=1.0), conv4_4_W=Orthogonal(gain=1.0), dense1_num_units=4096, # drop1_p=0.5, dense2_num_units=2048, # drop2_p=0.5, dense3_num_units=512, # drop3_p=0.5, dense3_nonlinearity=lasagne.nonlinearities.sigmoid, # output layer uses identity function output_nonlinearity=None, output_num_units=4, # optimization method: update=nesterov_momentum, update_learning_rate=theano.shared(np.float32(0.001)), update_momentum=theano.shared(np.float32(0.9)), batch_iterator_train=custom_batch_iterator(batch_size=72), batch_iterator_test=custom_batch_iterator(batch_size=48), on_epoch_finished=[ AdjustVariable('update_learning_rate', start=0.001, stop=0.00005), AdjustVariable('update_momentum', start=0.9, stop=0.98) ], # objective_loss_function=smooth_l1_loss, # objective_loss_function=iou_loss, custom_scores=[ ('smoothl1', smooth_l1_loss_val), ('iou_loss', iou_loss_val), ('squared_error', mean_squared_error) ], regression=True, max_epochs=max_epochs, verbose=1, ) return net1 def config_4c_1233_3d(batch_iterator="BatchIterator", max_epochs=30): custom_batch_iterator = globals()[batch_iterator] net1 = NeuralNet( layers=[ ('input', layers.InputLayer), ('conv1', layers.Conv2DLayer), ('pool1', layers.MaxPool2DLayer), ('conv2_1', layers.Conv2DLayer), ('conv2_2', layers.Conv2DLayer), ('pool2', layers.MaxPool2DLayer), ('conv3_1', layers.Conv2DLayer), ('conv3_2', layers.Conv2DLayer), ('conv3_3', layers.Conv2DLayer), ('pool3', layers.MaxPool2DLayer), ('conv4_1', layers.Conv2DLayer), ('conv4_2', layers.Conv2DLayer), ('conv4_3', layers.Conv2DLayer), ('pool4', layers.MaxPool2DLayer), ('dense1', layers.DenseLayer), ('dense2', layers.DenseLayer), ('dense3', layers.DenseLayer), ('output', layers.DenseLayer), ], # layer parameters: input_shape=(None, 3, 256, 256), conv1_num_filters=86, conv1_filter_size=(5, 5), conv1_stride=(2, 2), conv1_pad=(1, 1), pool1_pool_size=(2, 2), conv2_1_num_filters=128, conv2_1_filter_size=(3, 3), conv2_1_pad=(1, 1), conv2_2_num_filters=128, conv2_2_filter_size=(3, 3), conv2_2_pad=(1, 1), pool2_pool_size=(2, 2), conv3_1_num_filters=256, conv3_1_filter_size=(3, 3), conv3_1_pad=(1, 1), conv3_2_num_filters=256, conv3_2_filter_size=(3, 3), conv3_2_pad=(1, 1), conv3_3_num_filters=256, conv3_3_filter_size=(3, 3), conv3_3_pad=(1, 1), pool3_pool_size=(2, 2), conv4_1_num_filters=196, conv4_1_filter_size=(3, 3), conv4_1_pad=(1, 1), conv4_2_num_filters=196, conv4_2_filter_size=(3, 3), conv4_2_pad=(1, 1), conv4_3_num_filters=196, conv4_3_filter_size=(3, 3), conv4_3_pad=(1, 1), pool4_pool_size=(2, 2), conv1_W=Orthogonal(gain=1.0), conv2_1_W=Orthogonal(gain=1.0), conv2_2_W=Orthogonal(gain=1.0), conv3_1_W=Orthogonal(gain=1.0), conv3_2_W=Orthogonal(gain=1.0), conv3_3_W=Orthogonal(gain=1.0), conv4_1_W=Orthogonal(gain=1.0), conv4_2_W=Orthogonal(gain=1.0), conv4_3_W=Orthogonal(gain=1.0), dense1_num_units=2048, dense2_num_units=1024, dense3_num_units=512, # dense1_nonlinearity=lasagne.nonlinearities.rectify, # dense2_nonlinearity=lasagne.nonlinearities.rectify, dense3_nonlinearity=lasagne.nonlinearities.sigmoid, dense1_W=Orthogonal(gain=1.0), dense2_W=Orthogonal(gain=1.0), dense3_W=Orthogonal(gain=1.0), # output layer uses identity function output_nonlinearity=None, output_num_units=4, # optimization method: update=nesterov_momentum, update_learning_rate=0.01, update_momentum=0.975, batch_iterator_train=custom_batch_iterator(batch_size=64), batch_iterator_test=custom_batch_iterator(batch_size=64), regression=True, max_epochs=max_epochs, verbose=1, ) return net1
apache-2.0
hfp/tensorflow-xsmm
tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py
46
13101
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Estimator input.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import tempfile import numpy as np from tensorflow.python.training import training_util from tensorflow.contrib.layers.python.layers import optimizers from tensorflow.contrib.learn.python.learn import metric_spec from tensorflow.contrib.learn.python.learn import models from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.estimators import _sklearn from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.metrics.python.ops import metric_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test from tensorflow.python.training import input as input_lib from tensorflow.python.training import queue_runner_impl _BOSTON_INPUT_DIM = 13 _IRIS_INPUT_DIM = 4 def boston_input_fn(num_epochs=None): boston = base.load_boston() features = input_lib.limit_epochs( array_ops.reshape( constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]), num_epochs=num_epochs) labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1]) return features, labels def boston_input_fn_with_queue(num_epochs=None): features, labels = boston_input_fn(num_epochs=num_epochs) # Create a minimal queue runner. fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32) queue_runner = queue_runner_impl.QueueRunner(fake_queue, [constant_op.constant(0)]) queue_runner_impl.add_queue_runner(queue_runner) return features, labels def iris_input_fn(): iris = base.load_iris() features = array_ops.reshape( constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM]) labels = array_ops.reshape(constant_op.constant(iris.target), [-1]) return features, labels def iris_input_fn_labels_dict(): iris = base.load_iris() features = array_ops.reshape( constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM]) labels = { 'labels': array_ops.reshape(constant_op.constant(iris.target), [-1]) } return features, labels def boston_eval_fn(): boston = base.load_boston() n_examples = len(boston.target) features = array_ops.reshape( constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM]) labels = array_ops.reshape( constant_op.constant(boston.target), [n_examples, 1]) return array_ops.concat([features, features], 0), array_ops.concat([labels, labels], 0) def extract(data, key): if isinstance(data, dict): assert key in data return data[key] else: return data def linear_model_params_fn(features, labels, mode, params): features = extract(features, 'input') labels = extract(labels, 'labels') assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.INFER) prediction, loss = (models.linear_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=params['learning_rate']) return prediction, loss, train_op def linear_model_fn(features, labels, mode): features = extract(features, 'input') labels = extract(labels, 'labels') assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.INFER) if isinstance(features, dict): (_, features), = features.items() prediction, loss = (models.linear_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return prediction, loss, train_op def linear_model_fn_with_model_fn_ops(features, labels, mode): """Same as linear_model_fn, but returns `ModelFnOps`.""" assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.INFER) prediction, loss = (models.linear_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return model_fn.ModelFnOps( mode=mode, predictions=prediction, loss=loss, train_op=train_op) def logistic_model_no_mode_fn(features, labels): features = extract(features, 'input') labels = extract(labels, 'labels') labels = array_ops.one_hot(labels, 3, 1, 0) prediction, loss = (models.logistic_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return { 'class': math_ops.argmax(prediction, 1), 'prob': prediction }, loss, train_op VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n' EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n' class EstimatorInputTest(test.TestCase): def testContinueTrainingDictionaryInput(self): boston = base.load_boston() output_dir = tempfile.mkdtemp() est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir) boston_input = {'input': boston.data} float64_target = {'labels': boston.target.astype(np.float64)} est.fit(x=boston_input, y=float64_target, steps=50) scores = est.evaluate( x=boston_input, y=float64_target, metrics={ 'MSE': metric_ops.streaming_mean_squared_error }) del est # Create another estimator object with the same output dir. est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir) # Check we can evaluate and predict. scores2 = est2.evaluate( x=boston_input, y=float64_target, metrics={ 'MSE': metric_ops.streaming_mean_squared_error }) self.assertAllClose(scores2['MSE'], scores['MSE']) predictions = np.array(list(est2.predict(x=boston_input))) other_score = _sklearn.mean_squared_error(predictions, float64_target['labels']) self.assertAllClose(other_score, scores['MSE']) def testBostonAll(self): boston = base.load_boston() est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn)) float64_labels = boston.target.astype(np.float64) est.fit(x=boston.data, y=float64_labels, steps=100) scores = est.score( x=boston.data, y=float64_labels, metrics={ 'MSE': metric_ops.streaming_mean_squared_error }) predictions = np.array(list(est.predict(x=boston.data))) other_score = _sklearn.mean_squared_error(predictions, boston.target) self.assertAllClose(scores['MSE'], other_score) self.assertTrue('global_step' in scores) self.assertEqual(100, scores['global_step']) def testBostonAllDictionaryInput(self): boston = base.load_boston() est = estimator.Estimator(model_fn=linear_model_fn) boston_input = {'input': boston.data} float64_target = {'labels': boston.target.astype(np.float64)} est.fit(x=boston_input, y=float64_target, steps=100) scores = est.evaluate( x=boston_input, y=float64_target, metrics={ 'MSE': metric_ops.streaming_mean_squared_error }) predictions = np.array(list(est.predict(x=boston_input))) other_score = _sklearn.mean_squared_error(predictions, boston.target) self.assertAllClose(other_score, scores['MSE']) self.assertTrue('global_step' in scores) self.assertEqual(scores['global_step'], 100) def testIrisAll(self): iris = base.load_iris() est = estimator.SKCompat( estimator.Estimator(model_fn=logistic_model_no_mode_fn)) est.fit(iris.data, iris.target, steps=100) scores = est.score( x=iris.data, y=iris.target, metrics={ ('accuracy', 'class'): metric_ops.streaming_accuracy }) predictions = est.predict(x=iris.data) predictions_class = est.predict(x=iris.data, outputs=['class'])['class'] self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0]) self.assertAllClose(predictions['class'], predictions_class) self.assertAllClose(predictions['class'], np.argmax(predictions['prob'], axis=1)) other_score = _sklearn.accuracy_score(iris.target, predictions['class']) self.assertAllClose(scores['accuracy'], other_score) self.assertTrue('global_step' in scores) self.assertEqual(100, scores['global_step']) def testIrisAllDictionaryInput(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) iris_data = {'input': iris.data} iris_target = {'labels': iris.target} est.fit(iris_data, iris_target, steps=100) scores = est.evaluate( x=iris_data, y=iris_target, metrics={ ('accuracy', 'class'): metric_ops.streaming_accuracy }) predictions = list(est.predict(x=iris_data)) predictions_class = list(est.predict(x=iris_data, outputs=['class'])) self.assertEqual(len(predictions), iris.target.shape[0]) classes_batch = np.array([p['class'] for p in predictions]) self.assertAllClose(classes_batch, np.array([p['class'] for p in predictions_class])) self.assertAllClose(classes_batch, np.argmax( np.array([p['prob'] for p in predictions]), axis=1)) other_score = _sklearn.accuracy_score(iris.target, classes_batch) self.assertAllClose(other_score, scores['accuracy']) self.assertTrue('global_step' in scores) self.assertEqual(scores['global_step'], 100) def testIrisInputFn(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) est.fit(input_fn=iris_input_fn, steps=100) _ = est.evaluate(input_fn=iris_input_fn, steps=1) predictions = list(est.predict(x=iris.data)) self.assertEqual(len(predictions), iris.target.shape[0]) def testIrisInputFnLabelsDict(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) est.fit(input_fn=iris_input_fn_labels_dict, steps=100) _ = est.evaluate( input_fn=iris_input_fn_labels_dict, steps=1, metrics={ 'accuracy': metric_spec.MetricSpec( metric_fn=metric_ops.streaming_accuracy, prediction_key='class', label_key='labels') }) predictions = list(est.predict(x=iris.data)) self.assertEqual(len(predictions), iris.target.shape[0]) def testTrainInputFn(self): est = estimator.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, steps=1) _ = est.evaluate(input_fn=boston_eval_fn, steps=1) def testPredictInputFn(self): est = estimator.Estimator(model_fn=linear_model_fn) boston = base.load_boston() est.fit(input_fn=boston_input_fn, steps=1) input_fn = functools.partial(boston_input_fn, num_epochs=1) output = list(est.predict(input_fn=input_fn)) self.assertEqual(len(output), boston.target.shape[0]) def testPredictInputFnWithQueue(self): est = estimator.Estimator(model_fn=linear_model_fn) boston = base.load_boston() est.fit(input_fn=boston_input_fn, steps=1) input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2) output = list(est.predict(input_fn=input_fn)) self.assertEqual(len(output), boston.target.shape[0] * 2) def testPredictConstInputFn(self): est = estimator.Estimator(model_fn=linear_model_fn) boston = base.load_boston() est.fit(input_fn=boston_input_fn, steps=1) def input_fn(): features = array_ops.reshape( constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]) labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1]) return features, labels output = list(est.predict(input_fn=input_fn)) self.assertEqual(len(output), boston.target.shape[0]) if __name__ == '__main__': test.main()
apache-2.0
sonnyhu/scikit-learn
examples/linear_model/plot_ols.py
104
1936
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Linear Regression Example ========================================================= This example uses the only the first feature of the `diabetes` dataset, in order to illustrate a two-dimensional plot of this regression technique. The straight line can be seen in the plot, showing how linear regression attempts to draw a straight line that will best minimize the residual sum of squares between the observed responses in the dataset, and the responses predicted by the linear approximation. The coefficients, the residual sum of squares and the variance score are also calculated. """ print(__doc__) # Code source: Jaques Grobler # License: BSD 3 clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets, linear_model # Load the diabetes dataset diabetes = datasets.load_diabetes() # Use only one feature diabetes_X = diabetes.data[:, np.newaxis, 2] # Split the data into training/testing sets diabetes_X_train = diabetes_X[:-20] diabetes_X_test = diabetes_X[-20:] # Split the targets into training/testing sets diabetes_y_train = diabetes.target[:-20] diabetes_y_test = diabetes.target[-20:] # Create linear regression object regr = linear_model.LinearRegression() # Train the model using the training sets regr.fit(diabetes_X_train, diabetes_y_train) # The coefficients print('Coefficients: \n', regr.coef_) # The mean squared error print("Mean squared error: %.2f" % np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test)) # Plot outputs plt.scatter(diabetes_X_test, diabetes_y_test, color='black') plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue', linewidth=3) plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
caidongyun/Dato-Core
src/unity/python/graphlab/test/test_graph.py
13
15786
''' Copyright (C) 2015 Dato, Inc. All rights reserved. This software may be modified and distributed under the terms of the BSD license. See the DATO-PYTHON-LICENSE file for details. ''' # from nose import with_setup from graphlab.data_structures.sgraph import SGraph, Vertex, Edge, load_graph from graphlab.data_structures.sframe import SFrame import pandas as pd from pandas.util.testing import assert_frame_equal import numpy as np import unittest import tempfile import util import json class GraphTests(unittest.TestCase): def setUp(self): self.vertices = pd.DataFrame({ 'vid': ['1', '2', '3'], 'color': ['g', None, 'b'], 'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]}) self.edges = pd.DataFrame({ 'src_id': ['1', '2', '3'], 'dst_id': ['2', '3', '4'], 'weight': [0., None, 1.]}) def test_empty_graph(self): g = SGraph() self.assertEqual(g.summary(), {'num_vertices': 0, 'num_edges': 0}) self.assertEqual(len(g.get_fields()), 3) self.assertTrue(g.get_vertices(format='sframe').shape, (0, 1)) self.assertTrue(g.get_edges(format='sframe').shape, (0, 2)) self.assertTrue(g.vertices.shape, (0, 1)) self.assertTrue(g.edges.shape, (0, 2)) self.assertTrue(len(g.get_vertices(format='list')) == 0) self.assertTrue(len(g.get_edges(format='list')) == 0) def test_graph_constructor(self): g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id') g2 = SGraph(g.vertices, g.edges) g3 = SGraph(g.vertices, g.edges, src_field="__dst_id", dst_field="__src_id") #flip around src and dst assert_frame_equal(g.vertices.to_dataframe().sort('__id').reset_index(drop=True), g2.vertices.to_dataframe().sort('__id').reset_index(drop=True)) assert_frame_equal(g.edges.to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True), g2.edges.to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True)) self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges))) self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), 'vid', '__src_id', '__dst_id')) self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), vid_field=None, src_field='src_id', dst_field='dst_id')) def test_simple_graph(self): for input_type in [pd.DataFrame, SFrame, list]: g = SGraph() if input_type is list: vertices = [Vertex(x[1]['vid'], {'color': x[1]['color'], 'vec': x[1]['vec']}) for x in self.vertices.iterrows()] edges = [Edge(x[1]['src_id'], x[1]['dst_id'], {'weight': x[1]['weight']}) for x in self.edges.iterrows()] g = g.add_vertices(vertices) g = g.add_edges(edges) else: g = g.add_vertices(input_type(self.vertices), vid_field='vid') g = g.add_edges(input_type(self.edges), src_field='src_id', dst_field='dst_id') self.assertEqual(g.summary(), {'num_vertices': 4, 'num_edges': 3}) self.assertItemsEqual(g.get_fields(), ['__id', '__src_id', '__dst_id', 'color', 'vec', 'weight']) self.assertItemsEqual(g.get_vertices(format='dataframe').columns.values, ['color', 'vec']) self.assertItemsEqual(g.get_edges(format='dataframe').columns.values, ['__src_id', '__dst_id', 'weight']) self.assertTrue(g.get_edges(format='dataframe').shape, (3, 3)) self.assertTrue(g.get_vertices(format='dataframe').shape, (4, 3)) self.assertTrue(g.get_vertices(format='dataframe', fields={'color': 'g'}).shape, (1, 2)) self.assertTrue(g.get_edges(format='dataframe', fields={'weight': 0.}).shape, (1, 3)) self.assertItemsEqual(g.get_vertices(format='sframe').column_names(), ['__id', 'color', 'vec']) self.assertItemsEqual(g.get_edges(format='sframe').column_names(), ['__src_id', '__dst_id', 'weight']) self.assertTrue(g.get_edges(format='sframe').shape, (3, 3)) self.assertTrue(g.get_vertices(format='sframe').shape, (4, 3)) self.assertTrue(g.get_vertices(format='sframe', fields={'color': 'g'}).shape, (1, 2)) self.assertTrue(g.get_edges(format='sframe', fields={'weight': 0.}).shape, (1, 3)) vertices = g.get_vertices(format='list') edges = g.get_edges(format='list') self.assertEqual(len(vertices), 4) self.assertEqual(len(edges), 3) # get edges is lazy edges = g.get_edges() self.assertFalse(edges.__is_materialized__()) def test_vertex_query(self): df = pd.DataFrame({'src': ['a', 'c', 'b', 'd', 'c', 'e', 'g', 'f'], 'dst': ['b', 'b', 'd', 'c', 'e', 'g', 'f', 'e']}) g = SGraph().add_edges(df, src_field='src', dst_field='dst') # basic check g2 = g.get_neighborhood(ids=['b'], radius=1, full_subgraph=False) out = g2.get_edges(format='dataframe') out.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True) out.index = range(len(out)) correct = pd.DataFrame.from_records([('b', 'd'), ('a', 'b'), ('c', 'b')], columns=['__src_id', '__dst_id']) correct.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True) correct.index = range(len(correct)) assert_frame_equal(out, correct, check_dtype=False) # check larger radius, full subgraph, and multiple vertices g2 = g.get_neighborhood(ids=['a', 'g'], radius=2, full_subgraph=True) out = g2.get_edges(format='dataframe') out.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True) out.index = range(len(out)) correct = pd.DataFrame.from_records([('a', 'b'), ('b', 'd'), ('c', 'b'), ('c', 'e'), ('d', 'c'), ('e', 'g'), ('f', 'e'), ('g', 'f')], columns=['__src_id', '__dst_id']) correct.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True) correct.index = range(len(correct)) assert_frame_equal(out, correct, check_dtype=False) def test_select_query(self): g = SGraph() g = g.add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id') g2 = g.select_fields(["color", "weight"]) self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id', 'weight']) g2 = g.select_fields(["color"]) self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id']) del g.edges['weight'] del g.vertices['vec'] g.vertices['color2'] = g.vertices['color'] self.assertSequenceEqual((g.get_fields()), ['__id', 'color', 'color2', '__src_id', '__dst_id']) g2 = g.select_fields([]) self.assertSequenceEqual((g2.get_fields()), ['__id', '__src_id', '__dst_id']) def test_select_query_with_same_vertex_edge_field(self): vertices = SFrame({'__id': range(10)}) edges = SFrame({'__src_id': range(10), '__dst_id': range(1, 11)}) g = SGraph(vertices, edges) g.vertices['weight'] = 0 g.vertices['v'] = 0 g.edges['weight'] = 0 g.edges['e'] = 0 self.assertItemsEqual(g.get_fields(), ['v', 'e', 'weight', 'weight', '__id', '__src_id', '__dst_id']) g2 = g.select_fields('weight') self.assertItemsEqual(g2.get_fields(), ['weight', 'weight', '__id', '__src_id', '__dst_id']) def test_save_load(self): g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id') with util.TempDirectory() as f: g.save(f) g2 = load_graph(f, 'binary') self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3}) self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'}) with util.TempDirectory() as f: g.save(f, format='csv') vertices = SFrame.read_csv(f + "/vertices.csv") edges = SFrame.read_csv(f + "/edges.csv") g2 = SGraph().add_edges(edges, '__src_id', '__dst_id').add_vertices(vertices, '__id') self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3}) self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'}) with tempfile.NamedTemporaryFile(suffix='.json') as f: g.save(f.name) with open(f.name, 'r') as f2: data = f2.read() g2 = json.loads(data) self.assertTrue("vertices" in g2) self.assertTrue("edges" in g2) def test_load_graph_from_text(self): toy_graph_snap = """#some comment string #some more comment string 1\t2 1\t3 2\t3 2\t1 3\t1 3\t2""" toy_graph_tsv = """1\t2 1\t3 2\t3 2\t1 3\t1 3\t2""" toy_graph_csv = """1,2 1,3 2,3 2,1 3,1 3,2""" with tempfile.NamedTemporaryFile() as fsnap, tempfile.NamedTemporaryFile() as ftsv, tempfile.NamedTemporaryFile() as fcsv: fsnap.write(toy_graph_snap) fsnap.file.flush() ftsv.write(toy_graph_tsv) ftsv.file.flush() fcsv.write(toy_graph_csv) fcsv.file.flush() for (fname, fmt) in zip([fsnap.name, ftsv.name, fcsv.name], ['snap', 'tsv', 'csv']): g = load_graph('remote://' + fname, fmt) self.assertEqual(g.summary(), {'num_vertices': 3, 'num_edges': 6}) def test_robust_parse(self): df = pd.DataFrame({'int': [1, 2, 3], 'float': [1., 2., 3.], 'str': ['one', 'two', 'three'], 'nan': [np.nan, np.nan, np.nan], 'sparse_int': [1, 2, np.nan], 'sparse_float': [np.nan, 2., 3.], 'sparse_str': [None, 'two', None] }) g = SGraph().add_vertices(df) self.assertItemsEqual(g.get_fields(), df.columns.tolist() + ['__id', '__src_id', '__dst_id']) df2 = g.get_vertices(format='dataframe') sf = g.get_vertices(format='sframe') for col in df.columns: # potential bug: df2 is missing the 'nan' column. if (col != 'nan'): self.assertItemsEqual(sorted(list(df2[col].dropna())), sorted(list(df[col].dropna()))) self.assertItemsEqual(sorted(list(sf[col].dropna())), sorted(list(df[col].dropna()))) def test_missing_value_vids(self): vertices = SFrame() vertices['vid'] = [1, 2, 3, None] edges = SFrame() edges['src'] = [1, 2, 3, None] edges['dst'] = [4, 4, 4, 4] self.assertRaises(RuntimeError, lambda : SGraph().add_vertices(vertices, 'vid').summary()) self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'src', 'dst').summary()) self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'dst', 'src').summary()) def test_gframe(self): g = SGraph() v = g.vertices self.assertSequenceEqual(v.column_names(), ['__id']) e = g.edges self.assertSequenceEqual(e.column_names(), ['__src_id', '__dst_id']) # Test vertices and edge attributes cannot be modified def set_vertices_empty(g): g.vertices = SFrame() def set_edges_empty(g): g.edges = SFrame() def remove_vertices(g): del g.vertices def remove_edges(g): del g.edges def remove_edge_column(gf, name): del gf[name] self.assertRaises(AttributeError, lambda: remove_vertices(g)) self.assertRaises(AttributeError, lambda: remove_edges(g)) self.assertRaises(AttributeError, lambda: set_vertices_empty(g)) self.assertRaises(AttributeError, lambda: set_edges_empty(g)) # Test gframe operations has the same effect as its sframe+graph equivalent g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id') v = g.vertices v['id_col'] = v['__id'] e = g.edges e['src_id_col'] = e['__src_id'] e['dst_id_col'] = e['__dst_id'] g2 = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id') new_vdata = g2.get_vertices() new_vdata['id_col'] = new_vdata['__id'] new_edata = g2.get_edges() new_edata['src_id_col'] = new_edata['__src_id'] new_edata['dst_id_col'] = new_edata['__dst_id'] g2 = SGraph().add_vertices(new_vdata, '__id').add_edges(new_edata, '__src_id', '__dst_id') assert_frame_equal(g.get_vertices().to_dataframe().sort('__id').reset_index(drop=True), g2.get_vertices().to_dataframe().sort('__id').reset_index(drop=True)) assert_frame_equal(g.get_edges().to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True), g2.get_edges().to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True)) # check delete a column with exception, and edges is still in a valid state self.assertRaises(KeyError, lambda: remove_edge_column(g.edges, 'badcolumn')) g.edges.head() # test slicing assert_frame_equal(g.edges[:3].to_dataframe(), g.get_edges()[:3].to_dataframe()) assert_frame_equal(g.vertices[:3].to_dataframe(), g.get_vertices()[:3].to_dataframe()) # test add row number e_expected = g.get_edges().to_dataframe() v_expected = g.get_vertices().to_dataframe() e_expected['id'] = range(len(e_expected)) v_expected['id'] = range(len(v_expected)) def test_sframe_le_append_skip_row_bug_is_fixed(self): """ This test is actually for SFrame lazy evaluation. The reason it is here is because the repro can only be done in SGraph. The bug appears when the SFrame has lazy_append and when passing through the logical filter, skip_rows is not done correctly. So the edge_sframe is in a bad state when not materialized. This unit test stays here to ensure the bug is fixed until we can find a more clean repro. """ n = 12 # smallest n to repro the le_append bug # A graph with edge i -> i + 1 g = SGraph().add_edges(SFrame({'src': range(n), 'dst': range(1, n + 1)}), 'src', 'dst') lazy_sf = g.get_edges() materialized_sf = g.get_edges() materialized_sf.__materialize__() assert_frame_equal(lazy_sf[lazy_sf['__dst_id'] == n].to_dataframe(), materialized_sf[materialized_sf['__dst_id'] == n].to_dataframe())
agpl-3.0
AlexRobson/scikit-learn
sklearn/ensemble/tests/test_voting_classifier.py
40
6991
"""Testing for the boost module (sklearn.ensemble.boost).""" import numpy as np from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier from sklearn.grid_search import GridSearchCV from sklearn import datasets from sklearn import cross_validation from sklearn.datasets import make_multilabel_classification from sklearn.svm import SVC from sklearn.multiclass import OneVsRestClassifier # Load the iris dataset and randomly permute it iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target def test_majority_label_iris(): """Check classification by majority label on dataset iris.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard') scores = cross_validation.cross_val_score(eclf, X, y, cv=5, scoring='accuracy') assert_almost_equal(scores.mean(), 0.95, decimal=2) def test_tie_situation(): """Check voting classifier selects smaller class label in tie situation.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)], voting='hard') assert_equal(clf1.fit(X, y).predict(X)[73], 2) assert_equal(clf2.fit(X, y).predict(X)[73], 1) assert_equal(eclf.fit(X, y).predict(X)[73], 1) def test_weights_iris(): """Check classification by average probabilities on dataset iris.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', weights=[1, 2, 10]) scores = cross_validation.cross_val_score(eclf, X, y, cv=5, scoring='accuracy') assert_almost_equal(scores.mean(), 0.93, decimal=2) def test_predict_on_toy_problem(): """Manually check predicted class labels for toy dataset.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]]) y = np.array([1, 1, 1, 2, 2, 2]) assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard', weights=[1, 1, 1]) assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', weights=[1, 1, 1]) assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) def test_predict_proba_on_toy_problem(): """Calculate predicted probabilities on toy dataset.""" clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) y = np.array([1, 1, 2, 2]) clf1_res = np.array([[0.59790391, 0.40209609], [0.57622162, 0.42377838], [0.50728456, 0.49271544], [0.40241774, 0.59758226]]) clf2_res = np.array([[0.8, 0.2], [0.8, 0.2], [0.2, 0.8], [0.3, 0.7]]) clf3_res = np.array([[0.9985082, 0.0014918], [0.99845843, 0.00154157], [0., 1.], [0., 1.]]) t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4 t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4 t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4 t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4 eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', weights=[2, 1, 1]) eclf_res = eclf.fit(X, y).predict_proba(X) assert_almost_equal(t00, eclf_res[0][0], decimal=1) assert_almost_equal(t11, eclf_res[1][1], decimal=1) assert_almost_equal(t21, eclf_res[2][1], decimal=1) assert_almost_equal(t31, eclf_res[3][1], decimal=1) try: eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard') eclf.fit(X, y).predict_proba(X) except AttributeError: pass else: raise AssertionError('AttributeError for voting == "hard"' ' and with predict_proba not raised') def test_multilabel(): """Check if error is raised for multilabel classification.""" X, y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, return_indicator=True, random_state=123) clf = OneVsRestClassifier(SVC(kernel='linear')) eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard') try: eclf.fit(X, y) except NotImplementedError: return def test_gridsearch(): """Check GridSearch support.""" clf1 = LogisticRegression(random_state=1) clf2 = RandomForestClassifier(random_state=1) clf3 = GaussianNB() eclf = VotingClassifier(estimators=[ ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft') params = {'lr__C': [1.0, 100.0], 'voting': ['soft', 'hard'], 'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]} grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5) grid.fit(iris.data, iris.target)
bsd-3-clause
lscheinkman/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py
69
104273
""" Numerical python functions written for compatability with matlab(TM) commands with the same names. Matlab(TM) compatible functions ------------------------------- :func:`cohere` Coherence (normalized cross spectral density) :func:`csd` Cross spectral density uing Welch's average periodogram :func:`detrend` Remove the mean or best fit line from an array :func:`find` Return the indices where some condition is true; numpy.nonzero is similar but more general. :func:`griddata` interpolate irregularly distributed data to a regular grid. :func:`prctile` find the percentiles of a sequence :func:`prepca` Principal Component Analysis :func:`psd` Power spectral density uing Welch's average periodogram :func:`rk4` A 4th order runge kutta integrator for 1D or ND systems :func:`specgram` Spectrogram (power spectral density over segments of time) Miscellaneous functions ------------------------- Functions that don't exist in matlab(TM), but are useful anyway: :meth:`cohere_pairs` Coherence over all pairs. This is not a matlab function, but we compute coherence a lot in my lab, and we compute it for a lot of pairs. This function is optimized to do this efficiently by caching the direct FFTs. :meth:`rk4` A 4th order Runge-Kutta ODE integrator in case you ever find yourself stranded without scipy (and the far superior scipy.integrate tools) record array helper functions ------------------------------- A collection of helper methods for numpyrecord arrays .. _htmlonly:: See :ref:`misc-examples-index` :meth:`rec2txt` pretty print a record array :meth:`rec2csv` store record array in CSV file :meth:`csv2rec` import record array from CSV file with type inspection :meth:`rec_append_fields` adds field(s)/array(s) to record array :meth:`rec_drop_fields` drop fields from record array :meth:`rec_join` join two record arrays on sequence of fields :meth:`rec_groupby` summarize data by groups (similar to SQL GROUP BY) :meth:`rec_summarize` helper code to filter rec array fields into new fields For the rec viewer functions(e rec2csv), there are a bunch of Format objects you can pass into the functions that will do things like color negative values red, set percent formatting and scaling, etc. Example usage:: r = csv2rec('somefile.csv', checkrows=0) formatd = dict( weight = FormatFloat(2), change = FormatPercent(2), cost = FormatThousands(2), ) rec2excel(r, 'test.xls', formatd=formatd) rec2csv(r, 'test.csv', formatd=formatd) scroll = rec2gtk(r, formatd=formatd) win = gtk.Window() win.set_size_request(600,800) win.add(scroll) win.show_all() gtk.main() Deprecated functions --------------------- The following are deprecated; please import directly from numpy (with care--function signatures may differ): :meth:`conv` convolution (numpy.convolve) :meth:`corrcoef` The matrix of correlation coefficients :meth:`hist` Histogram (numpy.histogram) :meth:`linspace` Linear spaced array from min to max :meth:`load` load ASCII file - use numpy.loadtxt :meth:`meshgrid` Make a 2D grid from 2 1 arrays (numpy.meshgrid) :meth:`polyfit` least squares best polynomial fit of x to y (numpy.polyfit) :meth:`polyval` evaluate a vector for a vector of polynomial coeffs (numpy.polyval) :meth:`save` save ASCII file - use numpy.savetxt :meth:`trapz` trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x)) :meth:`vander` the Vandermonde matrix (numpy.vander) """ from __future__ import division import csv, warnings, copy, os import numpy as np ma = np.ma from matplotlib import verbose import matplotlib.nxutils as nxutils import matplotlib.cbook as cbook # set is a new builtin function in 2.4; delete the following when # support for 2.3 is dropped. try: set except NameError: from sets import Set as set def linspace(*args, **kw): warnings.warn("use numpy.linspace", DeprecationWarning) return np.linspace(*args, **kw) def meshgrid(x,y): warnings.warn("use numpy.meshgrid", DeprecationWarning) return np.meshgrid(x,y) def mean(x, dim=None): warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning) if len(x)==0: return None return np.mean(x, axis=dim) def logspace(xmin,xmax,N): return np.exp(np.linspace(np.log(xmin), np.log(xmax), N)) def _norm(x): "return sqrt(x dot x)" return np.sqrt(np.dot(x,x)) def window_hanning(x): "return x times the hanning window of len(x)" return np.hanning(len(x))*x def window_none(x): "No window function; simply return x" return x #from numpy import convolve as conv def conv(x, y, mode=2): 'convolve x with y' warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning) return np.convolve(x,y,mode) def detrend(x, key=None): if key is None or key=='constant': return detrend_mean(x) elif key=='linear': return detrend_linear(x) def demean(x, axis=0): "Return x minus its mean along the specified axis" x = np.asarray(x) if axis: ind = [slice(None)] * axis ind.append(np.newaxis) return x - x.mean(axis)[ind] return x - x.mean(axis) def detrend_mean(x): "Return x minus the mean(x)" return x - x.mean() def detrend_none(x): "Return x: no detrending" return x def detrend_linear(y): "Return y minus best fit line; 'linear' detrending " # This is faster than an algorithm based on linalg.lstsq. x = np.arange(len(y), dtype=np.float_) C = np.cov(x, y, bias=1) b = C[0,1]/C[0,0] a = y.mean() - b*x.mean() return y - (b*x + a) #This is a helper function that implements the commonality between the #psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): #The checks for if y is x are so that we can use the same function to #implement the core of psd(), csd(), and spectrogram() without doing #extra calculations. We return the unaveraged Pxy, freqs, and t. same_data = y is x #Make sure we're dealing with a numpy array. If y and x were the same #object to start with, keep them that way x = np.asarray(x) if not same_data: y = np.asarray(y) # zero pad x and y up to NFFT if they are shorter than NFFT if len(x)<NFFT: n = len(x) x = np.resize(x, (NFFT,)) x[n:] = 0 if not same_data and len(y)<NFFT: n = len(y) y = np.resize(y, (NFFT,)) y[n:] = 0 if pad_to is None: pad_to = NFFT if scale_by_freq is None: warnings.warn("psd, csd, and specgram have changed to scale their " "densities by the sampling frequency for better MatLab " "compatibility. You can pass scale_by_freq=False to disable " "this behavior. Also, one-sided densities are scaled by a " "factor of 2.") scale_by_freq = True # For real x, ignore the negative frequencies unless told otherwise if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided': numFreqs = pad_to scaling_factor = 1. elif sides in ('default', 'onesided'): numFreqs = pad_to//2 + 1 scaling_factor = 2. else: raise ValueError("sides must be one of: 'default', 'onesided', or " "'twosided'") # Matlab divides by the sampling frequency so that density function # has units of dB/Hz and can be integrated by the plotted frequency # values. Perform the same scaling here. if scale_by_freq: scaling_factor /= Fs if cbook.iterable(window): assert(len(window) == NFFT) windowVals = window else: windowVals = window(np.ones((NFFT,), x.dtype)) step = NFFT - noverlap ind = np.arange(0, len(x) - NFFT + 1, step) n = len(ind) Pxy = np.zeros((numFreqs,n), np.complex_) # do the ffts of the slices for i in range(n): thisX = x[ind[i]:ind[i]+NFFT] thisX = windowVals * detrend(thisX) fx = np.fft.fft(thisX, n=pad_to) if same_data: fy = fx else: thisY = y[ind[i]:ind[i]+NFFT] thisY = windowVals * detrend(thisY) fy = np.fft.fft(thisY, n=pad_to) Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs] # Scale the spectrum by the norm of the window to compensate for # windowing loss; see Bendat & Piersol Sec 11.5.2. Also include # scaling factors for one-sided densities and dividing by the sampling # frequency, if desired. Pxy *= scaling_factor / (np.abs(windowVals)**2).sum() t = 1./Fs * (ind + NFFT / 2.) freqs = float(Fs) / pad_to * np.arange(numFreqs) return Pxy, freqs, t #Split out these keyword docs so that they can be used elsewhere kwdocd = dict() kwdocd['PSD'] =""" Keyword arguments: *NFFT*: integer The number of data points used in each block for the FFT. Must be even; a power 2 is most efficient. The default value is 256. *Fs*: scalar The sampling frequency (samples per time unit). It is used to calculate the Fourier frequencies, freqs, in cycles per time unit. The default value is 2. *detrend*: callable The function applied to each segment before fft-ing, designed to remove the mean or linear trend. Unlike in matlab, where the *detrend* parameter is a vector, in matplotlib is it a function. The :mod:`~matplotlib.pylab` module defines :func:`~matplotlib.pylab.detrend_none`, :func:`~matplotlib.pylab.detrend_mean`, and :func:`~matplotlib.pylab.detrend_linear`, but you can use a custom function as well. *window*: callable or ndarray A function or a vector of length *NFFT*. To create window vectors see :func:`window_hanning`, :func:`window_none`, :func:`numpy.blackman`, :func:`numpy.hamming`, :func:`numpy.bartlett`, :func:`scipy.signal`, :func:`scipy.signal.get_window`, etc. The default is :func:`window_hanning`. If a function is passed as the argument, it must take a data segment as an argument and return the windowed version of the segment. *noverlap*: integer The number of points of overlap between blocks. The default value is 0 (no overlap). *pad_to*: integer The number of points to which the data segment is padded when performing the FFT. This can be different from *NFFT*, which specifies the number of data points used. While not increasing the actual resolution of the psd (the minimum distance between resolvable peaks), this can give more points in the plot, allowing for more detail. This corresponds to the *n* parameter in the call to fft(). The default is None, which sets *pad_to* equal to *NFFT* *sides*: [ 'default' | 'onesided' | 'twosided' ] Specifies which sides of the PSD to return. Default gives the default behavior, which returns one-sided for real data and both for complex data. 'onesided' forces the return of a one-sided PSD, while 'twosided' forces two-sided. *scale_by_freq*: boolean Specifies whether the resulting density values should be scaled by the scaling frequency, which gives density in units of Hz^-1. This allows for integration over the returned frequency values. The default is True for MatLab compatibility. """ def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): """ The power spectral density by Welch's average periodogram method. The vector *x* is divided into *NFFT* length blocks. Each block is detrended by the function *detrend* and windowed by the function *window*. *noverlap* gives the length of the overlap between blocks. The absolute(fft(block))**2 of each segment are averaged to compute *Pxx*, with a scaling to correct for power loss due to windowing. If len(*x*) < *NFFT*, it will be zero padded to *NFFT*. *x* Array or sequence containing the data %(PSD)s Returns the tuple (*Pxx*, *freqs*). Refs: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) """ Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) return Pxx.real,freqs psd.__doc__ = psd.__doc__ % kwdocd def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): """ The cross power spectral density by Welch's average periodogram method. The vectors *x* and *y* are divided into *NFFT* length blocks. Each block is detrended by the function *detrend* and windowed by the function *window*. *noverlap* gives the length of the overlap between blocks. The product of the direct FFTs of *x* and *y* are averaged over each segment to compute *Pxy*, with a scaling to correct for power loss due to windowing. If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero padded to *NFFT*. *x*, *y* Array or sequence containing the data %(PSD)s Returns the tuple (*Pxy*, *freqs*). Refs: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) """ Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) if len(Pxy.shape) == 2 and Pxy.shape[1]>1: Pxy = Pxy.mean(axis=1) return Pxy, freqs csd.__doc__ = csd.__doc__ % kwdocd def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=128, pad_to=None, sides='default', scale_by_freq=None): """ Compute a spectrogram of data in *x*. Data are split into *NFFT* length segements and the PSD of each section is computed. The windowing function *window* is applied to each segment, and the amount of overlap of each segment is specified with *noverlap*. If *x* is real (i.e. non-complex) only the spectrum of the positive frequencie is returned. If *x* is complex then the complete spectrum is returned. %(PSD)s Returns a tuple (*Pxx*, *freqs*, *t*): - *Pxx*: 2-D array, columns are the periodograms of successive segments - *freqs*: 1-D array of frequencies corresponding to the rows in Pxx - *t*: 1-D array of times corresponding to midpoints of segments. .. seealso:: :func:`psd`: :func:`psd` differs in the default overlap; in returning the mean of the segment periodograms; and in not returning times. """ assert(NFFT > noverlap) Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Pxx = Pxx.real #Needed since helper implements generically if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided': # center the frequency range at zero freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2])) Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0) return Pxx, freqs, t specgram.__doc__ = specgram.__doc__ % kwdocd _coh_error = """Coherence is calculated by averaging over *NFFT* length segments. Your signal is too short for your choice of *NFFT*. """ def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): """ The coherence between *x* and *y*. Coherence is the normalized cross spectral density: .. math:: C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}} *x*, *y* Array or sequence containing the data %(PSD)s The return value is the tuple (*Cxy*, *f*), where *f* are the frequencies of the coherence vector. For cohere, scaling the individual densities by the sampling frequency has no effect, since the factors cancel out. .. seealso:: :func:`psd` and :func:`csd`: For information about the methods used to compute :math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`. """ if len(x)<2*NFFT: raise ValueError(_coh_error) Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy) Cxy.shape = (len(f),) return Cxy, f cohere.__doc__ = cohere.__doc__ % kwdocd def corrcoef(*args): """ corrcoef(*X*) where *X* is a matrix returns a matrix of correlation coefficients for the columns of *X* corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of correlation coefficients for *x* and *y*. Numpy arrays can be real or complex. The correlation matrix is defined from the covariance matrix *C* as .. math:: r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}} """ warnings.warn("Use numpy.corrcoef", DeprecationWarning) kw = dict(rowvar=False) return np.corrcoef(*args, **kw) def polyfit(*args, **kwargs): u""" polyfit(*x*, *y*, *N*) Do a best fit polynomial of order *N* of *y* to *x*. Return value is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for *N*=2:: p2*x0^2 + p1*x0 + p0 = y1 p2*x1^2 + p1*x1 + p0 = y1 p2*x2^2 + p1*x2 + p0 = y2 ..... p2*xk^2 + p1*xk + p0 = yk Method: if *X* is a the Vandermonde Matrix computed from *x* (see `vandermonds <http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the polynomial least squares solution is given by the '*p*' in X*p = y where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix, *p* is a *N*+1 length vector, and *y* is a (len(*x*) \N{MULTIPLICATION SIGN} 1) vector. This equation can be solved as .. math:: p = (X_t X)^-1 X_t y where :math:`X_t` is the transpose of *X* and -1 denotes the inverse. Numerically, however, this is not a good method, so we use :func:`numpy.linalg.lstsq`. For more info, see `least squares fitting <http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_, but note that the *k*'s and *n*'s in the superscripts and subscripts on that page. The linear algebra is correct, however. .. seealso:: :func:`polyval` """ warnings.warn("use numpy.poyfit", DeprecationWarning) return np.polyfit(*args, **kwargs) def polyval(*args, **kwargs): """ *y* = polyval(*p*, *x*) *p* is a vector of polynomial coeffients and *y* is the polynomial evaluated at *x*. Example code to remove a polynomial (quadratic) trend from y:: p = polyfit(x, y, 2) trend = polyval(p, x) resid = y - trend .. seealso:: :func:`polyfit` """ warnings.warn("use numpy.polyval", DeprecationWarning) return np.polyval(*args, **kwargs) def vander(*args, **kwargs): """ *X* = vander(*x*, *N* = *None*) The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is *None* it defaults to len(*x*). """ warnings.warn("Use numpy.vander()", DeprecationWarning) return np.vander(*args, **kwargs) def donothing_callback(*args): pass def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, preferSpeedOverMemory=True, progressCallback=donothing_callback, returnPxx=False): u""" Cxy, Phase, freqs = cohere_pairs(X, ij, ...) Compute the coherence for all pairs in *ij*. *X* is a (*numSamples*, *numCols*) numpy array. *ij* is a list of tuples (*i*, *j*). Each tuple is a pair of indexes into the columns of *X* for which you want to compute coherence. For example, if *X* has 64 columns, and you want to compute all nonredundant pairs, define *ij* as:: ij = [] for i in range(64): for j in range(i+1,64): ij.append( (i, j) ) The other function arguments, except for *preferSpeedOverMemory* (see below), are explained in the help string of :func:`psd`. Return value is a tuple (*Cxy*, *Phase*, *freqs*). - *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of dictionary keys is ``len(ij)``. - *Phase*: a dictionary of phases of the cross spectral density at each frequency for each pair. The keys are ``(i,j)``. - *freqs*: a vector of frequencies, equal in length to either the coherence or phase vectors for any (*i*, *j*) key.. Eg, to make a coherence Bode plot:: subplot(211) plot( freqs, Cxy[(12,19)]) subplot(212) plot( freqs, Phase[(12,19)]) For a large number of pairs, :func:`cohere_pairs` can be much more efficient than just calling :func:`cohere` for each pair, because it caches most of the intensive computations. If *N* is the number of pairs, this function is O(N) for most of the heavy lifting, whereas calling cohere for each pair is O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is also more memory intensive, making 2 additional complex arrays with approximately the same number of elements as *X*. The parameter *preferSpeedOverMemory*, if *False*, limits the caching by only making one, rather than two, complex cache arrays. This is useful if memory becomes critical. Even when *preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will still give significant performace gains over calling :func:`cohere` for each pair, and will use subtantially less memory than if *preferSpeedOverMemory* is *True*. In my tests with a (43000, 64) array over all non-redundant pairs, *preferSpeedOverMemory* = *True* delivered a 33% performace boost on a 1.7GHZ Athlon with 512MB RAM compared with *preferSpeedOverMemory* = *False*. But both solutions were more than 10x faster than naievly crunching all possible pairs through cohere. .. seealso:: :file:`test/cohere_pairs_test.py` in the src tree: For an example script that shows that this :func:`cohere_pairs` and :func:`cohere` give the same results for a given pair. """ numRows, numCols = X.shape # zero pad if X is too short if numRows < NFFT: tmp = X X = np.zeros( (NFFT, numCols), X.dtype) X[:numRows,:] = tmp del tmp numRows, numCols = X.shape # get all the columns of X that we are interested in by checking # the ij tuples seen = {} for i,j in ij: seen[i]=1; seen[j] = 1 allColumns = seen.keys() Ncols = len(allColumns) del seen # for real X, ignore the negative frequencies if np.iscomplexobj(X): numFreqs = NFFT else: numFreqs = NFFT//2+1 # cache the FFT of every windowed, detrended NFFT length segement # of every channel. If preferSpeedOverMemory, cache the conjugate # as well if cbook.iterable(window): assert(len(window) == NFFT) windowVals = window else: windowVals = window(np.ones((NFFT,), typecode(X))) ind = range(0, numRows-NFFT+1, NFFT-noverlap) numSlices = len(ind) FFTSlices = {} FFTConjSlices = {} Pxx = {} slices = range(numSlices) normVal = norm(windowVals)**2 for iCol in allColumns: progressCallback(i/Ncols, 'Cacheing FFTs') Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_) for iSlice in slices: thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol] thisSlice = windowVals*detrend(thisSlice) Slices[iSlice,:] = fft(thisSlice)[:numFreqs] FFTSlices[iCol] = Slices if preferSpeedOverMemory: FFTConjSlices[iCol] = conjugate(Slices) Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal) del Slices, ind, windowVals # compute the coherences and phases for all pairs using the # cached FFTs Cxy = {} Phase = {} count = 0 N = len(ij) for i,j in ij: count +=1 if count%10==0: progressCallback(count/N, 'Computing coherences') if preferSpeedOverMemory: Pxy = FFTSlices[i] * FFTConjSlices[j] else: Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j]) if numSlices>1: Pxy = np.mean(Pxy) Pxy = np.divide(Pxy, normVal) Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j]) Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real) freqs = Fs/NFFT*np.arange(numFreqs) if returnPxx: return Cxy, Phase, freqs, Pxx else: return Cxy, Phase, freqs def entropy(y, bins): r""" Return the entropy of the data in *y*. .. math:: \sum p_i \log_2(p_i) where :math:`p_i` is the probability of observing *y* in the :math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a range of bins; see :func:`numpy.histogram`. Compare *S* with analytic calculation for a Gaussian:: x = mu + sigma * randn(200000) Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) ) """ n,bins = np.histogram(y, bins) n = n.astype(np.float_) n = np.take(n, np.nonzero(n)[0]) # get the positive p = np.divide(n, len(y)) delta = bins[1]-bins[0] S = -1.0*np.sum(p*log(p)) + log(delta) #S = -1.0*np.sum(p*log(p)) return S def hist(y, bins=10, normed=0): """ Return the histogram of *y* with *bins* equally sized bins. If bins is an array, use those bins. Return value is (*n*, *x*) where *n* is the count for each bin in *x*. If *normed* is *False*, return the counts in the first element of the returned tuple. If *normed* is *True*, return the probability density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`. If *y* has rank > 1, it will be raveled. If *y* is masked, only the unmasked values will be used. Credits: the Numeric 22 documentation """ warnings.warn("Use numpy.histogram()", DeprecationWarning) return np.histogram(y, bins=bins, range=None, normed=normed) def normpdf(x, *args): "Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*" mu, sigma = args return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2) def levypdf(x, gamma, alpha): "Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*" N = len(x) if N%2 != 0: raise ValueError, 'x must be an event length array; try\n' + \ 'x = np.linspace(minx, maxx, N), where N is even' dx = x[1]-x[0] f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_) ind = np.concatenate([np.arange(N/2, N, int), np.arange(0, N/2, int)]) df = f[1]-f[0] cfl = exp(-gamma*np.absolute(2*pi*f)**alpha) px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_) return np.take(px, ind) def find(condition): "Return the indices where ravel(condition) is true" res, = np.nonzero(np.ravel(condition)) return res def trapz(x, y): """ Trapezoidal integral of *y*(*x*). """ warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning) return np.trapz(y, x) #if len(x)!=len(y): # raise ValueError, 'x and y must have the same length' #if len(x)<2: # raise ValueError, 'x and y must have > 1 element' #return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1])) def longest_contiguous_ones(x): """ Return the indices of the longest stretch of contiguous ones in *x*, assuming *x* is a vector of zeros and ones. If there are two equally long stretches, pick the first. """ x = np.ravel(x) if len(x)==0: return np.array([]) ind = (x==0).nonzero()[0] if len(ind)==0: return np.arange(len(x)) if len(ind)==len(x): return np.array([]) y = np.zeros( (len(x)+2,), x.dtype) y[1:-1] = x dif = np.diff(y) up = (dif == 1).nonzero()[0]; dn = (dif == -1).nonzero()[0]; i = (dn-up == max(dn - up)).nonzero()[0][0] ind = np.arange(up[i], dn[i]) return ind def longest_ones(x): '''alias for longest_contiguous_ones''' return longest_contiguous_ones(x) def prepca(P, frac=0): """ Compute the principal components of *P*. *P* is a (*numVars*, *numObs*) array. *frac* is the minimum fraction of variance that a component must contain to be included. Return value is a tuple of the form (*Pcomponents*, *Trans*, *fracVar*) where: - *Pcomponents* : a (numVars, numObs) array - *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* * *P* - *fracVar* : the fraction of the variance accounted for by each component returned A similar function of the same name was in the Matlab (TM) R13 Neural Network Toolbox but is not found in later versions; its successor seems to be called "processpcs". """ U,s,v = np.linalg.svd(P) varEach = s**2/P.shape[1] totVar = varEach.sum() fracVar = varEach/totVar ind = slice((fracVar>=frac).sum()) # select the components that are greater Trans = U[:,ind].transpose() # The transformed data Pcomponents = np.dot(Trans,P) return Pcomponents, Trans, fracVar[ind] def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)): """ Return the percentiles of *x*. *p* can either be a sequence of percentile values or a scalar. If *p* is a sequence, the ith element of the return sequence is the *p*(i)-th percentile of *x*. If *p* is a scalar, the largest value of *x* less than or equal to the *p* percentage point in the sequence is returned. """ x = np.array(x).ravel() # we need a copy x.sort() Nx = len(x) if not cbook.iterable(p): return x[int(p*Nx/100.0)] p = np.asarray(p)* Nx/100.0 ind = p.astype(int) ind = np.where(ind>=Nx, Nx-1, ind) return x.take(ind) def prctile_rank(x, p): """ Return the rank for each element in *x*, return the rank 0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a len(*x*) array with values in [0,1,2,3] where 0 indicates the value is less than the 25th percentile, 1 indicates the value is >= the 25th and < 50th percentile, ... and 3 indicates the value is above the 75th percentile cutoff. *p* is either an array of percentiles in [0..100] or a scalar which indicates how many quantiles of data you want ranked. """ if not cbook.iterable(p): p = np.arange(100.0/p, 100.0, 100.0/p) else: p = np.asarray(p) if p.max()<=1 or p.min()<0 or p.max()>100: raise ValueError('percentiles should be in range 0..100, not 0..1') ptiles = prctile(x, p) return np.searchsorted(ptiles, x) def center_matrix(M, dim=0): """ Return the matrix *M* with each row having zero mean and unit std. If *dim* = 1 operate on columns instead of rows. (*dim* is opposite to the numpy axis kwarg.) """ M = np.asarray(M, np.float_) if dim: M = (M - M.mean(axis=0)) / M.std(axis=0) else: M = (M - M.mean(axis=1)[:,np.newaxis]) M = M / M.std(axis=1)[:,np.newaxis] return M def rk4(derivs, y0, t): """ Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta. This is a toy implementation which may be useful if you find yourself stranded on a system w/o scipy. Otherwise use :func:`scipy.integrate`. *y0* initial state vector *t* sample times *derivs* returns the derivative of the system and has the signature ``dy = derivs(yi, ti)`` Example 1 :: ## 2D system def derivs6(x,t): d1 = x[0] + 2*x[1] d2 = -3*x[0] + 4*x[1] return (d1, d2) dt = 0.0005 t = arange(0.0, 2.0, dt) y0 = (1,2) yout = rk4(derivs6, y0, t) Example 2:: ## 1D system alpha = 2 def derivs(x,t): return -alpha*x + exp(-t) y0 = 1 yout = rk4(derivs, y0, t) If you have access to scipy, you should probably be using the scipy.integrate tools rather than this function. """ try: Ny = len(y0) except TypeError: yout = np.zeros( (len(t),), np.float_) else: yout = np.zeros( (len(t), Ny), np.float_) yout[0] = y0 i = 0 for i in np.arange(len(t)-1): thist = t[i] dt = t[i+1] - thist dt2 = dt/2.0 y0 = yout[i] k1 = np.asarray(derivs(y0, thist)) k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2)) k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2)) k4 = np.asarray(derivs(y0 + dt*k3, thist+dt)) yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4) return yout def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0): """ Bivariate Gaussian distribution for equal shape *X*, *Y*. See `bivariate normal <http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_ at mathworld. """ Xmu = X-mux Ymu = Y-muy rho = sigmaxy/(sigmax*sigmay) z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay) denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2) return np.exp( -z/(2*(1-rho**2))) / denom def get_xyz_where(Z, Cond): """ *Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is a boolean matrix where some condition is satisfied. Return value is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and *z* are the values of *Z* at those indices. *x*, *y*, and *z* are 1D arrays. """ X,Y = np.indices(Z.shape) return X[Cond], Y[Cond], Z[Cond] def get_sparse_matrix(M,N,frac=0.1): """ Return a *M* x *N* sparse matrix with *frac* elements randomly filled. """ data = np.zeros((M,N))*0. for i in range(int(M*N*frac)): x = np.random.randint(0,M-1) y = np.random.randint(0,N-1) data[x,y] = np.random.rand() return data def dist(x,y): """ Return the distance between two points. """ d = x-y return np.sqrt(np.dot(d,d)) def dist_point_to_segment(p, s0, s1): """ Get the distance of a point to a segment. *p*, *s0*, *s1* are *xy* sequences This algorithm from http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment """ p = np.asarray(p, np.float_) s0 = np.asarray(s0, np.float_) s1 = np.asarray(s1, np.float_) v = s1 - s0 w = p - s0 c1 = np.dot(w,v); if ( c1 <= 0 ): return dist(p, s0); c2 = np.dot(v,v) if ( c2 <= c1 ): return dist(p, s1); b = c1 / c2 pb = s0 + b * v; return dist(p, pb) def segments_intersect(s1, s2): """ Return *True* if *s1* and *s2* intersect. *s1* and *s2* are defined as:: s1: (x1, y1), (x2, y2) s2: (x3, y3), (x4, y4) """ (x1, y1), (x2, y2) = s1 (x3, y3), (x4, y4) = s2 den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1)) n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3)) n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3)) if den == 0: # lines parallel return False u1 = n1/den u2 = n2/den return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0 def fftsurr(x, detrend=detrend_none, window=window_none): """ Compute an FFT phase randomized surrogate of *x*. """ if cbook.iterable(window): x=window*detrend(x) else: x = window(detrend(x)) z = np.fft.fft(x) a = 2.*np.pi*1j phase = a * np.random.rand(len(x)) z = z*np.exp(phase) return np.fft.ifft(z).real def liaupunov(x, fprime): """ *x* is a very long trajectory from a map, and *fprime* returns the derivative of *x*. Returns : .. math:: \lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)| .. seealso:: Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos". `Wikipedia article on Lyapunov Exponent <http://en.wikipedia.org/wiki/Lyapunov_exponent>`_. .. note:: What the function here calculates may not be what you really want; *caveat emptor*. It also seems that this function's name is badly misspelled. """ return np.mean(np.log(np.absolute(fprime(x)))) class FIFOBuffer: """ A FIFO queue to hold incoming *x*, *y* data in a rotating buffer using numpy arrays under the hood. It is assumed that you will call asarrays much less frequently than you add data to the queue -- otherwise another data structure will be faster. This can be used to support plots where data is added from a real time feed and the plot object wants to grab data from the buffer and plot it to screen less freqeuently than the incoming. If you set the *dataLim* attr to :class:`~matplotlib.transforms.BBox` (eg :attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as new data come in. TODO: add a grow method that will extend nmax .. note:: mlab seems like the wrong place for this class. """ def __init__(self, nmax): """ Buffer up to *nmax* points. """ self._xa = np.zeros((nmax,), np.float_) self._ya = np.zeros((nmax,), np.float_) self._xs = np.zeros((nmax,), np.float_) self._ys = np.zeros((nmax,), np.float_) self._ind = 0 self._nmax = nmax self.dataLim = None self.callbackd = {} def register(self, func, N): """ Call *func* every time *N* events are passed; *func* signature is ``func(fifo)``. """ self.callbackd.setdefault(N, []).append(func) def add(self, x, y): """ Add scalar *x* and *y* to the queue. """ if self.dataLim is not None: xys = ((x,y),) self.dataLim.update(xys, -1) #-1 means use the default ignore setting ind = self._ind % self._nmax #print 'adding to fifo:', ind, x, y self._xs[ind] = x self._ys[ind] = y for N,funcs in self.callbackd.items(): if (self._ind%N)==0: for func in funcs: func(self) self._ind += 1 def last(self): """ Get the last *x*, *y* or *None*. *None* if no data set. """ if self._ind==0: return None, None ind = (self._ind-1) % self._nmax return self._xs[ind], self._ys[ind] def asarrays(self): """ Return *x* and *y* as arrays; their length will be the len of data added or *nmax*. """ if self._ind<self._nmax: return self._xs[:self._ind], self._ys[:self._ind] ind = self._ind % self._nmax self._xa[:self._nmax-ind] = self._xs[ind:] self._xa[self._nmax-ind:] = self._xs[:ind] self._ya[:self._nmax-ind] = self._ys[ind:] self._ya[self._nmax-ind:] = self._ys[:ind] return self._xa, self._ya def update_datalim_to_current(self): """ Update the *datalim* in the current data in the fifo. """ if self.dataLim is None: raise ValueError('You must first set the dataLim attr') x, y = self.asarrays() self.dataLim.update_numerix(x, y, True) def movavg(x,n): """ Compute the len(*n*) moving average of *x*. """ w = np.empty((n,), dtype=np.float_) w[:] = 1.0/n return np.convolve(x, w, mode='valid') def save(fname, X, fmt='%.18e',delimiter=' '): """ Save the data in *X* to file *fname* using *fmt* string to convert the data to strings. *fname* can be a filename or a file handle. If the filename ends in '.gz', the file is automatically saved in compressed gzip format. The :func:`load` function understands gzipped files transparently. Example usage:: save('test.out', X) # X is an array save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays save('test2.out', x) # x is 1D save('test3.out', x, fmt='%1.4e') # use exponential notation *delimiter* is used to separate the fields, eg. *delimiter* ',' for comma-separated values. """ if cbook.is_string_like(fname): if fname.endswith('.gz'): import gzip fh = gzip.open(fname,'wb') else: fh = file(fname,'w') elif hasattr(fname, 'seek'): fh = fname else: raise ValueError('fname must be a string or file handle') X = np.asarray(X) origShape = None if X.ndim == 1: origShape = X.shape X.shape = len(X), 1 for row in X: fh.write(delimiter.join([fmt%val for val in row]) + '\n') if origShape is not None: X.shape = origShape def load(fname,comments='#',delimiter=None, converters=None,skiprows=0, usecols=None, unpack=False, dtype=np.float_): """ Load ASCII data from *fname* into an array and return the array. The data must be regular, same number of values in every row *fname* can be a filename or a file handle. Support for gzipped files is automatic, if the filename ends in '.gz'. matfile data is not supported; for that, use :mod:`scipy.io.mio` module. Example usage:: X = load('test.dat') # data in two columns t = X[:,0] y = X[:,1] Alternatively, you can do the same with "unpack"; see below:: X = load('test.dat') # a matrix of data x = load('test.dat') # a single column of data - *comments*: the character used to indicate the start of a comment in the file - *delimiter* is a string-like character used to seperate values in the file. If *delimiter* is unspecified or *None*, any whitespace string is a separator. - *converters*, if not *None*, is a dictionary mapping column number to a function that will convert that column to a float (or the optional *dtype* if specified). Eg, if column 0 is a date string:: converters = {0:datestr2num} - *skiprows* is the number of rows from the top to skip. - *usecols*, if not *None*, is a sequence of integer column indexes to extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract just the 2nd, 5th and 6th columns - *unpack*, if *True*, will transpose the matrix allowing you to unpack into named arguments on the left hand side:: t,y = load('test.dat', unpack=True) # for two column data x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True) - *dtype*: the array will have this dtype. default: ``numpy.float_`` .. seealso:: See :file:`examples/pylab_examples/load_converter.py` in the source tree: Exercises many of these options. """ if converters is None: converters = {} fh = cbook.to_filehandle(fname) X = [] if delimiter==' ': # space splitting is a special case since x.split() is what # you want, not x.split(' ') def splitfunc(x): return x.split() else: def splitfunc(x): return x.split(delimiter) converterseq = None for i,line in enumerate(fh): if i<skiprows: continue line = line.split(comments, 1)[0].strip() if not len(line): continue if converterseq is None: converterseq = [converters.get(j,float) for j,val in enumerate(splitfunc(line))] if usecols is not None: vals = splitfunc(line) row = [converterseq[j](vals[j]) for j in usecols] else: row = [converterseq[j](val) for j,val in enumerate(splitfunc(line))] thisLen = len(row) X.append(row) X = np.array(X, dtype) r,c = X.shape if r==1 or c==1: X.shape = max(r,c), if unpack: return X.transpose() else: return X def slopes(x,y): """ SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES calculates Y'(X), i.e the slope of a curve Y(X). The slope is estimated using the slope obtained from that of a parabola through any three consecutive points. This method should be superior to that described in the appendix of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel W. Stineman (Creative Computing July 1980) in at least one aspect: Circles for interpolation demand a known aspect ratio between x- and y-values. For many functions, however, the abscissa are given in different dimensions, so an aspect ratio is completely arbitrary. The parabola method gives very similar results to the circle method for most regular cases but behaves much better in special cases Norbert Nemec, Institute of Theoretical Physics, University or Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de (inspired by a original implementation by Halldor Bjornsson, Icelandic Meteorological Office, March 2006 halldor at vedur.is) """ # Cast key variables as float. x=np.asarray(x, np.float_) y=np.asarray(y, np.float_) yp=np.zeros(y.shape, np.float_) dx=x[1:] - x[:-1] dy=y[1:] - y[:-1] dydx = dy/dx yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1]) yp[0] = 2.0 * dy[0]/dx[0] - yp[1] yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2] return yp def stineman_interp(xi,x,y,yp=None): """ STINEMAN_INTERP Well behaved data interpolation. Given data vectors X and Y, the slope vector YP and a new abscissa vector XI the function stineman_interp(xi,x,y,yp) uses Stineman interpolation to calculate a vector YI corresponding to XI. Here's an example that generates a coarse sine curve, then interpolates over a finer abscissa: x = linspace(0,2*pi,20); y = sin(x); yp = cos(x) xi = linspace(0,2*pi,40); yi = stineman_interp(xi,x,y,yp); plot(x,y,'o',xi,yi) The interpolation method is described in the article A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell W. Stineman. The article appeared in the July 1980 issue of Creative Computing with a note from the editor stating that while they were not an academic journal but once in a while something serious and original comes in adding that this was "apparently a real solution" to a well known problem. For yp=None, the routine automatically determines the slopes using the "slopes" routine. X is assumed to be sorted in increasing order For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a extrapolation. The relevance of the data obtained from this, of course, questionable... original implementation by Halldor Bjornsson, Icelandic Meteorolocial Office, March 2006 halldor at vedur.is completely reworked and optimized for Python by Norbert Nemec, Institute of Theoretical Physics, University or Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de """ # Cast key variables as float. x=np.asarray(x, np.float_) y=np.asarray(y, np.float_) assert x.shape == y.shape N=len(y) if yp is None: yp = slopes(x,y) else: yp=np.asarray(yp, np.float_) xi=np.asarray(xi, np.float_) yi=np.zeros(xi.shape, np.float_) # calculate linear slopes dx = x[1:] - x[:-1] dy = y[1:] - y[:-1] s = dy/dx #note length of s is N-1 so last element is #N-2 # find the segment each xi is in # this line actually is the key to the efficiency of this implementation idx = np.searchsorted(x[1:-1], xi) # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1] # except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1] # the y-values that would come out from a linear interpolation: sidx = s.take(idx) xidx = x.take(idx) yidx = y.take(idx) xidxp1 = x.take(idx+1) yo = yidx + sidx * (xi - xidx) # the difference that comes when using the slopes given in yp dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point dy1dy2 = dy1*dy2 # The following is optimized for Python. The solution actually # does more calculations than necessary but exploiting the power # of numpy, this is far more efficient than coding a loop by hand # in Python yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1, ((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)), 0.0, 1/(dy1+dy2),)) return yi def inside_poly(points, verts): """ points is a sequence of x,y points verts is a sequence of x,y vertices of a poygon return value is a sequence of indices into points for the points that are inside the polygon """ res, = np.nonzero(nxutils.points_inside_poly(points, verts)) return res def poly_below(ymin, xs, ys): """ given a arrays *xs* and *ys*, return the vertices of a polygon that has a scalar lower bound *ymin* and an upper bound at the *ys*. intended for use with Axes.fill, eg:: xv, yv = poly_below(0, x, y) ax.fill(xv, yv) """ return poly_between(xs, ys, xmin) def poly_between(x, ylower, yupper): """ given a sequence of x, ylower and yupper, return the polygon that fills the regions between them. ylower or yupper can be scalar or iterable. If they are iterable, they must be equal in length to x return value is x, y arrays for use with Axes.fill """ Nx = len(x) if not cbook.iterable(ylower): ylower = ylower*np.ones(Nx) if not cbook.iterable(yupper): yupper = yupper*np.ones(Nx) x = np.concatenate( (x, x[::-1]) ) y = np.concatenate( (yupper, ylower[::-1]) ) return x,y ### the following code was written and submitted by Fernando Perez ### from the ipython numutils package under a BSD license # begin fperez functions """ A set of convenient utilities for numerical work. Most of this module requires numpy or is meant to be used with it. Copyright (c) 2001-2004, Fernando Perez. <[email protected]> All rights reserved. This license was generated from the BSD license template as found in: http://www.opensource.org/licenses/bsd-license.php Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the IPython project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import operator import math #***************************************************************************** # Globals #**************************************************************************** # function definitions exp_safe_MIN = math.log(2.2250738585072014e-308) exp_safe_MAX = 1.7976931348623157e+308 def exp_safe(x): """ Compute exponentials which safely underflow to zero. Slow, but convenient to use. Note that numpy provides proper floating point exception handling with access to the underlying hardware. """ if type(x) is np.ndarray: return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX)) else: return math.exp(x) def amap(fn,*args): """ amap(function, sequence[, sequence, ...]) -> array. Works like :func:`map`, but it returns an array. This is just a convenient shorthand for ``numpy.array(map(...))``. """ return np.array(map(fn,*args)) #from numpy import zeros_like def zeros_like(a): """ Return an array of zeros of the shape and typecode of *a*. """ warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning) return np.zeros_like(a) #from numpy import sum as sum_flat def sum_flat(a): """ Return the sum of all the elements of *a*, flattened out. It uses ``a.flat``, and if *a* is not contiguous, a call to ``ravel(a)`` is made. """ warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning) return np.sum(a) #from numpy import mean as mean_flat def mean_flat(a): """ Return the mean of all the elements of *a*, flattened out. """ warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning) return np.mean(a) def rms_flat(a): """ Return the root mean square of all the elements of *a*, flattened out. """ return np.sqrt(np.mean(np.absolute(a)**2)) def l1norm(a): """ Return the *l1* norm of *a*, flattened out. Implemented as a separate function (not a call to :func:`norm` for speed). """ return np.sum(np.absolute(a)) def l2norm(a): """ Return the *l2* norm of *a*, flattened out. Implemented as a separate function (not a call to :func:`norm` for speed). """ return np.sqrt(np.sum(np.absolute(a)**2)) def norm_flat(a,p=2): """ norm(a,p=2) -> l-p norm of a.flat Return the l-p norm of *a*, considered as a flat array. This is NOT a true matrix norm, since arrays of arbitrary rank are always flattened. *p* can be a number or the string 'Infinity' to get the L-infinity norm. """ # This function was being masked by a more general norm later in # the file. We may want to simply delete it. if p=='Infinity': return np.amax(np.absolute(a)) else: return (np.sum(np.absolute(a)**p))**(1.0/p) def frange(xini,xfin=None,delta=None,**kw): """ frange([start,] stop[, step, keywords]) -> array of floats Return a numpy ndarray containing a progression of floats. Similar to :func:`numpy.arange`, but defaults to a closed interval. ``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start* defaults to 0, and the endpoint *is included*. This behavior is different from that of :func:`range` and :func:`numpy.arange`. This is deliberate, since :func:`frange` will probably be more useful for generating lists of points for function evaluation, and endpoints are often desired in this use. The usual behavior of :func:`range` can be obtained by setting the keyword *closed* = 0, in this case, :func:`frange` basically becomes :func:numpy.arange`. When *step* is given, it specifies the increment (or decrement). All arguments can be floating point numbers. ``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where *xfin* <= *x1*. :func:`frange` can also be called with the keyword *npts*. This sets the number of points the list should contain (and overrides the value *step* might have been given). :func:`numpy.arange` doesn't offer this option. Examples:: >>> frange(3) array([ 0., 1., 2., 3.]) >>> frange(3,closed=0) array([ 0., 1., 2.]) >>> frange(1,6,2) array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries >>> frange(1,6.5,npts=5) array([ 1. , 2.375, 3.75 , 5.125, 6.5 ]) """ #defaults kw.setdefault('closed',1) endpoint = kw['closed'] != 0 # funny logic to allow the *first* argument to be optional (like range()) # This was modified with a simpler version from a similar frange() found # at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472 if xfin == None: xfin = xini + 0.0 xini = 0.0 if delta == None: delta = 1.0 # compute # of points, spacing and return final list try: npts=kw['npts'] delta=(xfin-xini)/float(npts-endpoint) except KeyError: npts = int(round((xfin-xini)/delta)) + endpoint #npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint # round finds the nearest, so the endpoint can be up to # delta/2 larger than xfin. return np.arange(npts)*delta+xini # end frange() #import numpy.diag as diagonal_matrix def diagonal_matrix(diag): """ Return square diagonal matrix whose non-zero elements are given by the input array. """ warnings.warn("Use numpy.diag(d)", DeprecationWarning) return np.diag(diag) def identity(n, rank=2, dtype='l', typecode=None): """ Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*). For ranks higher than 2, this object is simply a multi-index Kronecker delta:: / 1 if i0=i1=...=iR, id[i0,i1,...,iR] = -| \ 0 otherwise. Optionally a *dtype* (or typecode) may be given (it defaults to 'l'). Since rank defaults to 2, this function behaves in the default case (when only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is much faster. """ if typecode is not None: warnings.warn("Use dtype kwarg instead of typecode", DeprecationWarning) dtype = typecode iden = np.zeros((n,)*rank, dtype) for i in range(n): idx = (i,)*rank iden[idx] = 1 return iden def base_repr (number, base = 2, padding = 0): """ Return the representation of a *number* in any given *base*. """ chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' if number < base: \ return (padding - 1) * chars [0] + chars [int (number)] max_exponent = int (math.log (number)/math.log (base)) max_power = long (base) ** max_exponent lead_digit = int (number/max_power) return chars [lead_digit] + \ base_repr (number - max_power * lead_digit, base, \ max (padding - 1, max_exponent)) def binary_repr(number, max_length = 1025): """ Return the binary representation of the input *number* as a string. This is more efficient than using :func:`base_repr` with base 2. Increase the value of max_length for very large numbers. Note that on 32-bit machines, 2**1023 is the largest integer power of 2 which can be converted to a Python float. """ #assert number < 2L << max_length shifts = map (operator.rshift, max_length * [number], \ range (max_length - 1, -1, -1)) digits = map (operator.mod, shifts, max_length * [2]) if not digits.count (1): return 0 digits = digits [digits.index (1):] return ''.join (map (repr, digits)).replace('L','') def log2(x,ln2 = math.log(2.0)): """ Return the log(*x*) in base 2. This is a _slow_ function but which is guaranteed to return the correct integer value if the input is an integer exact power of 2. """ try: bin_n = binary_repr(x)[1:] except (AssertionError,TypeError): return math.log(x)/ln2 else: if '1' in bin_n: return math.log(x)/ln2 else: return len(bin_n) def ispower2(n): """ Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise. Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly. """ bin_n = binary_repr(n)[1:] if '1' in bin_n: return 0 else: return len(bin_n) def isvector(X): """ Like the Matlab (TM) function with the same name, returns *True* if the supplied numpy array or matrix *X* looks like a vector, meaning it has a one non-singleton axis (i.e., it can have multiple axes, but all must have length 1, except for one of them). If you just want to see if the array has 1 axis, use X.ndim == 1. """ return np.prod(X.shape)==np.max(X.shape) #from numpy import fromfunction as fromfunction_kw def fromfunction_kw(function, dimensions, **kwargs): """ Drop-in replacement for :func:`numpy.fromfunction`. Allows passing keyword arguments to the desired function. Call it as (keywords are optional):: fromfunction_kw(MyFunction, dimensions, keywords) The function ``MyFunction`` is responsible for handling the dictionary of keywords it will receive. """ warnings.warn("Use numpy.fromfunction()", DeprecationWarning) return np.fromfunction(function, dimensions, **kwargs) ### end fperez numutils code def rem(x,y): """ Deprecated - see :func:`numpy.remainder` """ raise NotImplementedError('Deprecated - see numpy.remainder') def norm(x,y=2): """ Deprecated - see :func:`numpy.linalg.norm` """ raise NotImplementedError('Deprecated - see numpy.linalg.norm') def orth(A): """ Deprecated - needs clean room implementation """ raise NotImplementedError('Deprecated - needs clean room implementation') def rank(x): """ Deprecated - see :func:`numpy.rank` """ raise NotImplementedError('Deprecated - see numpy.rank') def sqrtm(x): """ Deprecated - needs clean room implementation """ raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm') def mfuncC(f, x): """ Deprecated """ raise NotImplementedError('Deprecated - needs clean room implementation') def approx_real(x): """ Deprecated - needs clean room implementation """ raise NotImplementedError('Deprecated - needs clean room implementation') #helpers for loading, saving, manipulating and viewing numpy record arrays def safe_isnan(x): ':func:`numpy.isnan` for arbitrary types' if cbook.is_string_like(x): return False try: b = np.isnan(x) except NotImplementedError: return False except TypeError: return False else: return b def safe_isinf(x): ':func:`numpy.isinf` for arbitrary types' if cbook.is_string_like(x): return False try: b = np.isinf(x) except NotImplementedError: return False except TypeError: return False else: return b def rec_view(rec): """ Return a view of an ndarray as a recarray .. seealso:: http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html """ return rec.view(np.recarray) #return rec.view(dtype=(np.record, rec.dtype), type=np.recarray) def rec_append_field(rec, name, arr, dtype=None): """ Return a new record array with field name populated with data from array *arr*. This function is Deprecated. Please use :func:`rec_append_fields`. """ warnings.warn("use rec_append_fields", DeprecationWarning) return rec_append_fields(rec, name, arr, dtype) def rec_append_fields(rec, names, arrs, dtypes=None): """ Return a new record array with field names populated with data from arrays in *arrs*. If appending a single field, then *names*, *arrs* and *dtypes* do not have to be lists. They can just be the values themselves. """ if (not cbook.is_string_like(names) and cbook.iterable(names) \ and len(names) and cbook.is_string_like(names[0])): if len(names) != len(arrs): raise ValueError, "number of arrays do not match number of names" else: # we have only 1 name and 1 array names = [names] arrs = [arrs] arrs = map(np.asarray, arrs) if dtypes is None: dtypes = [a.dtype for a in arrs] elif not cbook.iterable(dtypes): dtypes = [dtypes] if len(arrs) != len(dtypes): if len(dtypes) == 1: dtypes = dtypes * len(arrs) else: raise ValueError, "dtypes must be None, a single dtype or a list" newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes)) newrec = np.empty(rec.shape, dtype=newdtype) for field in rec.dtype.fields: newrec[field] = rec[field] for name, arr in zip(names, arrs): newrec[name] = arr return rec_view(newrec) def rec_drop_fields(rec, names): """ Return a new numpy record array with fields in *names* dropped. """ names = set(names) Nr = len(rec) newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names if name not in names]) newrec = np.empty(Nr, dtype=newdtype) for field in newdtype.names: newrec[field] = rec[field] return rec_view(newrec) def rec_groupby(r, groupby, stats): """ *r* is a numpy record array *groupby* is a sequence of record array attribute names that together form the grouping key. eg ('date', 'productcode') *stats* is a sequence of (*attr*, *func*, *outname*) tuples which will call ``x = func(attr)`` and assign *x* to the record array output with attribute *outname*. For example:: stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') ) Return record array has *dtype* names for each attribute name in the the *groupby* argument, with the associated group values, and for each outname name in the *stats* argument, with the associated stat summary output. """ # build a dictionary from groupby keys-> list of indices into r with # those keys rowd = dict() for i, row in enumerate(r): key = tuple([row[attr] for attr in groupby]) rowd.setdefault(key, []).append(i) # sort the output by groupby keys keys = rowd.keys() keys.sort() rows = [] for key in keys: row = list(key) # get the indices for this groupby key ind = rowd[key] thisr = r[ind] # call each stat function for this groupby slice row.extend([func(thisr[attr]) for attr, func, outname in stats]) rows.append(row) # build the output record array with groupby and outname attributes attrs, funcs, outnames = zip(*stats) names = list(groupby) names.extend(outnames) return np.rec.fromrecords(rows, names=names) def rec_summarize(r, summaryfuncs): """ *r* is a numpy record array *summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples which will apply *func* to the the array *r*[attr] and assign the output to a new attribute name *outname*. The returned record array is identical to *r*, with extra arrays for each element in *summaryfuncs*. """ names = list(r.dtype.names) arrays = [r[name] for name in names] for attr, func, outname in summaryfuncs: names.append(outname) arrays.append(np.asarray(func(r[attr]))) return np.rec.fromarrays(arrays, names=names) def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'): """ Join record arrays *r1* and *r2* on *key*; *key* is a tuple of field names -- if *key* is a string it is assumed to be a single attribute name. If *r1* and *r2* have equal values on all the keys in the *key* tuple, then their fields will be merged into a new record array containing the intersection of the fields of *r1* and *r2*. *r1* (also *r2*) must not have any duplicate keys. The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To do a rightouter join just reverse *r1* and *r2*. The *defaults* keyword is a dictionary filled with ``{column_name:default_value}`` pairs. The keywords *r1postfix* and *r2postfix* are postfixed to column names (other than keys) that are both in *r1* and *r2*. """ if cbook.is_string_like(key): key = (key, ) for name in key: if name not in r1.dtype.names: raise ValueError('r1 does not have key field %s'%name) if name not in r2.dtype.names: raise ValueError('r2 does not have key field %s'%name) def makekey(row): return tuple([row[name] for name in key]) r1d = dict([(makekey(row),i) for i,row in enumerate(r1)]) r2d = dict([(makekey(row),i) for i,row in enumerate(r2)]) r1keys = set(r1d.keys()) r2keys = set(r2d.keys()) common_keys = r1keys & r2keys r1ind = np.array([r1d[k] for k in common_keys]) r2ind = np.array([r2d[k] for k in common_keys]) common_len = len(common_keys) left_len = right_len = 0 if jointype == "outer" or jointype == "leftouter": left_keys = r1keys.difference(r2keys) left_ind = np.array([r1d[k] for k in left_keys]) left_len = len(left_ind) if jointype == "outer": right_keys = r2keys.difference(r1keys) right_ind = np.array([r2d[k] for k in right_keys]) right_len = len(right_ind) def key_desc(name): 'if name is a string key, use the larger size of r1 or r2 before merging' dt1 = r1.dtype[name] if dt1.type != np.string_: return (name, dt1.descr[0][1]) dt2 = r1.dtype[name] assert dt2==dt1 if dt1.num>dt2.num: return (name, dt1.descr[0][1]) else: return (name, dt2.descr[0][1]) keydesc = [key_desc(name) for name in key] def mapped_r1field(name): """ The column name in *newrec* that corresponds to the column in *r1*. """ if name in key or name not in r2.dtype.names: return name else: return name + r1postfix def mapped_r2field(name): """ The column name in *newrec* that corresponds to the column in *r2*. """ if name in key or name not in r1.dtype.names: return name else: return name + r2postfix r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key] r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key] newdtype = np.dtype(keydesc + r1desc + r2desc) newrec = np.empty(common_len + left_len + right_len, dtype=newdtype) if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse newrec_fields = newrec.dtype.fields.keys() for k, v in defaults.items(): if k in newrec_fields: newrec[k] = v for field in r1.dtype.names: newfield = mapped_r1field(field) if common_len: newrec[newfield][:common_len] = r1[field][r1ind] if (jointype == "outer" or jointype == "leftouter") and left_len: newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind] for field in r2.dtype.names: newfield = mapped_r2field(field) if field not in key and common_len: newrec[newfield][:common_len] = r2[field][r2ind] if jointype == "outer" and right_len: newrec[newfield][-right_len:] = r2[field][right_ind] newrec.sort(order=key) return rec_view(newrec) def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',', converterd=None, names=None, missing='', missingd=None, use_mrecords=True): """ Load data from comma/space/tab delimited file in *fname* into a numpy record array and return the record array. If *names* is *None*, a header row is required to automatically assign the recarray names. The headers will be lower cased, spaces will be converted to underscores, and illegal attribute name characters removed. If *names* is not *None*, it is a sequence of names to use for the column names. In this case, it is assumed there is no header row. - *fname*: can be a filename or a file handle. Support for gzipped files is automatic, if the filename ends in '.gz' - *comments*: the character used to indicate the start of a comment in the file - *skiprows*: is the number of rows from the top to skip - *checkrows*: is the number of rows to check to validate the column data type. When set to zero all rows are validated. - *converted*: if not *None*, is a dictionary mapping column number or munged column name to a converter function. - *names*: if not None, is a list of header names. In this case, no header will be read from the file - *missingd* is a dictionary mapping munged column names to field values which signify that the field does not contain actual data and should be masked, e.g. '0000-00-00' or 'unused' - *missing*: a string whose value signals a missing field regardless of the column it appears in - *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing If no rows are found, *None* is returned -- see :file:`examples/loadrec.py` """ if converterd is None: converterd = dict() if missingd is None: missingd = {} import dateutil.parser import datetime parsedate = dateutil.parser.parse fh = cbook.to_filehandle(fname) class FH: """ For space-delimited files, we want different behavior than comma or tab. Generally, we want multiple spaces to be treated as a single separator, whereas with comma and tab we want multiple commas to return multiple (empty) fields. The join/strip trick below effects this. """ def __init__(self, fh): self.fh = fh def close(self): self.fh.close() def seek(self, arg): self.fh.seek(arg) def fix(self, s): return ' '.join(s.split()) def next(self): return self.fix(self.fh.next()) def __iter__(self): for line in self.fh: yield self.fix(line) if delimiter==' ': fh = FH(fh) reader = csv.reader(fh, delimiter=delimiter) def process_skiprows(reader): if skiprows: for i, row in enumerate(reader): if i>=(skiprows-1): break return fh, reader process_skiprows(reader) def ismissing(name, val): "Should the value val in column name be masked?" if val == missing or val == missingd.get(name) or val == '': return True else: return False def with_default_value(func, default): def newfunc(name, val): if ismissing(name, val): return default else: return func(val) return newfunc def mybool(x): if x=='True': return True elif x=='False': return False else: raise ValueError('invalid bool') dateparser = dateutil.parser.parse mydateparser = with_default_value(dateparser, datetime.date(1,1,1)) myfloat = with_default_value(float, np.nan) myint = with_default_value(int, -1) mystr = with_default_value(str, '') mybool = with_default_value(mybool, None) def mydate(x): # try and return a date object d = dateparser(x) if d.hour>0 or d.minute>0 or d.second>0: raise ValueError('not a date') return d.date() mydate = with_default_value(mydate, datetime.date(1,1,1)) def get_func(name, item, func): # promote functions in this order funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr} try: func(name, item) except: if func==mystr: raise ValueError('Could not find a working conversion function') else: return get_func(name, item, funcmap[func]) # recurse else: return func # map column names that clash with builtins -- TODO - extend this list itemd = { 'return' : 'return_', 'file' : 'file_', 'print' : 'print_', } def get_converters(reader): converters = None for i, row in enumerate(reader): if i==0: converters = [mybool]*len(row) if checkrows and i>checkrows: break #print i, len(names), len(row) #print 'converters', zip(converters, row) for j, (name, item) in enumerate(zip(names, row)): func = converterd.get(j) if func is None: func = converterd.get(name) if func is None: #if not item.strip(): continue func = converters[j] if len(item.strip()): func = get_func(name, item, func) else: # how should we handle custom converters and defaults? func = with_default_value(func, None) converters[j] = func return converters # Get header and remove invalid characters needheader = names is None if needheader: for row in reader: #print 'csv2rec', row if len(row) and row[0].startswith(comments): continue headers = row break # remove these chars delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") delete.add('"') names = [] seen = dict() for i, item in enumerate(headers): item = item.strip().lower().replace(' ', '_') item = ''.join([c for c in item if c not in delete]) if not len(item): item = 'column%d'%i item = itemd.get(item, item) cnt = seen.get(item, 0) if cnt>0: names.append(item + '_%d'%cnt) else: names.append(item) seen[item] = cnt+1 else: if cbook.is_string_like(names): names = [n.strip() for n in names.split(',')] # get the converter functions by inspecting checkrows converters = get_converters(reader) if converters is None: raise ValueError('Could not find any valid data in CSV file') # reset the reader and start over fh.seek(0) reader = csv.reader(fh, delimiter=delimiter) process_skiprows(reader) if needheader: skipheader = reader.next() # iterate over the remaining rows and convert the data to date # objects, ints, or floats as approriate rows = [] rowmasks = [] for i, row in enumerate(reader): if not len(row): continue if row[0].startswith(comments): continue rows.append([func(name, val) for func, name, val in zip(converters, names, row)]) rowmasks.append([ismissing(name, val) for name, val in zip(names, row)]) fh.close() if not len(rows): return None if use_mrecords and np.any(rowmasks): try: from numpy.ma import mrecords except ImportError: raise RuntimeError('numpy 1.05 or later is required for masked array support') else: r = mrecords.fromrecords(rows, names=names, mask=rowmasks) else: r = np.rec.fromrecords(rows, names=names) return r # a series of classes for describing the format intentions of various rec views class FormatObj: def tostr(self, x): return self.toval(x) def toval(self, x): return str(x) def fromstr(self, s): return s class FormatString(FormatObj): def tostr(self, x): val = repr(x) return val[1:-1] #class FormatString(FormatObj): # def tostr(self, x): # return '"%r"'%self.toval(x) class FormatFormatStr(FormatObj): def __init__(self, fmt): self.fmt = fmt def tostr(self, x): if x is None: return 'None' return self.fmt%self.toval(x) class FormatFloat(FormatFormatStr): def __init__(self, precision=4, scale=1.): FormatFormatStr.__init__(self, '%%1.%df'%precision) self.precision = precision self.scale = scale def toval(self, x): if x is not None: x = x * self.scale return x def fromstr(self, s): return float(s)/self.scale class FormatInt(FormatObj): def tostr(self, x): return '%d'%int(x) def toval(self, x): return int(x) def fromstr(self, s): return int(s) class FormatBool(FormatObj): def toval(self, x): return str(x) def fromstr(self, s): return bool(s) class FormatPercent(FormatFloat): def __init__(self, precision=4): FormatFloat.__init__(self, precision, scale=100.) class FormatThousands(FormatFloat): def __init__(self, precision=4): FormatFloat.__init__(self, precision, scale=1e-3) class FormatMillions(FormatFloat): def __init__(self, precision=4): FormatFloat.__init__(self, precision, scale=1e-6) class FormatDate(FormatObj): def __init__(self, fmt): self.fmt = fmt def toval(self, x): if x is None: return 'None' return x.strftime(self.fmt) def fromstr(self, x): import dateutil.parser return dateutil.parser.parse(x).date() class FormatDatetime(FormatDate): def __init__(self, fmt='%Y-%m-%d %H:%M:%S'): FormatDate.__init__(self, fmt) def fromstr(self, x): import dateutil.parser return dateutil.parser.parse(x) defaultformatd = { np.bool_ : FormatBool(), np.int16 : FormatInt(), np.int32 : FormatInt(), np.int64 : FormatInt(), np.float32 : FormatFloat(), np.float64 : FormatFloat(), np.object_ : FormatObj(), np.string_ : FormatString(), } def get_formatd(r, formatd=None): 'build a formatd guaranteed to have a key for every dtype name' if formatd is None: formatd = dict() for i, name in enumerate(r.dtype.names): dt = r.dtype[name] format = formatd.get(name) if format is None: format = defaultformatd.get(dt.type, FormatObj()) formatd[name] = format return formatd def csvformat_factory(format): format = copy.deepcopy(format) if isinstance(format, FormatFloat): format.scale = 1. # override scaling for storage format.fmt = '%r' return format def rec2txt(r, header=None, padding=3, precision=3): """ Returns a textual representation of a record array. *r*: numpy recarray *header*: list of column headers *padding*: space between each column *precision*: number of decimal places to use for floats. Set to an integer to apply to all floats. Set to a list of integers to apply precision individually. Precision for non-floats is simply ignored. Example:: precision=[0,2,3] Output:: ID Price Return ABC 12.54 0.234 XYZ 6.32 -0.076 """ if cbook.is_numlike(precision): precision = [precision]*len(r.dtype) def get_type(item,atype=int): tdict = {None:int, int:float, float:str} try: atype(str(item)) except: return get_type(item,tdict[atype]) return atype def get_justify(colname, column, precision): ntype = type(column[0]) if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_: length = max(len(colname),column.itemsize) return 0, length+padding, "%s" # left justify if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_: length = max(len(colname),np.max(map(len,map(str,column)))) return 1, length+padding, "%d" # right justify # JDH: my powerbook does not have np.float96 using np 1.3.0 """ In [2]: np.__version__ Out[2]: '1.3.0.dev5948' In [3]: !uname -a Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386 In [4]: np.float96 --------------------------------------------------------------------------- AttributeError Traceback (most recent call la """ if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_: fmt = "%." + str(precision) + "f" length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column)))) return 1, length+padding, fmt # right justify return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s" if header is None: header = r.dtype.names justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)] justify_pad_prec_spacer = [] for i in range(len(justify_pad_prec)): just,pad,prec = justify_pad_prec[i] if i == 0: justify_pad_prec_spacer.append((just,pad,prec,0)) else: pjust,ppad,pprec = justify_pad_prec[i-1] if pjust == 0 and just == 1: justify_pad_prec_spacer.append((just,pad-padding,prec,0)) elif pjust == 1 and just == 0: justify_pad_prec_spacer.append((just,pad,prec,padding)) else: justify_pad_prec_spacer.append((just,pad,prec,0)) def format(item, just_pad_prec_spacer): just, pad, prec, spacer = just_pad_prec_spacer if just == 0: return spacer*' ' + str(item).ljust(pad) else: if get_type(item) == float: item = (prec%float(item)) elif get_type(item) == int: item = (prec%int(item)) return item.rjust(pad) textl = [] textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)])) for i, row in enumerate(r): textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)])) if i==0: textl[0] = textl[0].rstrip() text = os.linesep.join(textl) return text def rec2csv(r, fname, delimiter=',', formatd=None, missing='', missingd=None): """ Save the data from numpy recarray *r* into a comma-/space-/tab-delimited file. The record array dtype names will be used for column headers. *fname*: can be a filename or a file handle. Support for gzipped files is automatic, if the filename ends in '.gz' .. seealso:: :func:`csv2rec`: For information about *missing* and *missingd*, which can be used to fill in masked values into your CSV file. """ if missingd is None: missingd = dict() def with_mask(func): def newfunc(val, mask, mval): if mask: return mval else: return func(val) return newfunc formatd = get_formatd(r, formatd) funcs = [] for i, name in enumerate(r.dtype.names): funcs.append(with_mask(csvformat_factory(formatd[name]).tostr)) fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True) writer = csv.writer(fh, delimiter=delimiter) header = r.dtype.names writer.writerow(header) # Our list of specials for missing values mvals = [] for name in header: mvals.append(missingd.get(name, missing)) ismasked = False if len(r): row = r[0] ismasked = hasattr(row, '_fieldmask') for row in r: if ismasked: row, rowmask = row.item(), row._fieldmask.item() else: rowmask = [False] * len(row) writer.writerow([func(val, mask, mval) for func, val, mask, mval in zip(funcs, row, rowmask, mvals)]) if opened: fh.close() def griddata(x,y,z,xi,yi): """ ``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* = *f*(*x*, *y*) to the data in the (usually) nonuniformly spaced vectors (*x*, *y*, *z*). :func:`griddata` interpolates this surface at the points specified by (*xi*, *yi*) to produce *zi*. *xi* and *yi* must describe a regular grid, can be either 1D or 2D, but must be monotonically increasing. A masked array is returned if any grid points are outside convex hull defined by input data (no extrapolation is done). Uses natural neighbor interpolation based on Delaunay triangulation. By default, this algorithm is provided by the :mod:`matplotlib.delaunay` package, written by Robert Kern. The triangulation algorithm in this package is known to fail on some nearly pathological cases. For this reason, a separate toolkit (:mod:`mpl_tookits.natgrid`) has been created that provides a more robust algorithm fof triangulation and interpolation. This toolkit is based on the NCAR natgrid library, which contains code that is not redistributable under a BSD-compatible license. When installed, this function will use the :mod:`mpl_toolkits.natgrid` algorithm, otherwise it will use the built-in :mod:`matplotlib.delaunay` package. The natgrid matplotlib toolkit can be downloaded from http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792 """ try: from mpl_toolkits.natgrid import _natgrid, __version__ _use_natgrid = True except ImportError: import matplotlib.delaunay as delaunay from matplotlib.delaunay import __version__ _use_natgrid = False if not griddata._reported: if _use_natgrid: verbose.report('using natgrid version %s' % __version__) else: verbose.report('using delaunay version %s' % __version__) griddata._reported = True if xi.ndim != yi.ndim: raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)") if xi.ndim != 1 and xi.ndim != 2: raise TypeError("inputs xi and yi must be 1D or 2D.") if not len(x)==len(y)==len(z): raise TypeError("inputs x,y,z must all be 1D arrays of the same length") # remove masked points. if hasattr(z,'mask'): x = x.compress(z.mask == False) y = y.compress(z.mask == False) z = z.compressed() if _use_natgrid: # use natgrid toolkit if available. if xi.ndim == 2: xi = xi[0,:] yi = yi[:,0] # override default natgrid internal parameters. _natgrid.seti('ext',0) _natgrid.setr('nul',np.nan) # cast input arrays to doubles (this makes a copy) x = x.astype(np.float) y = y.astype(np.float) z = z.astype(np.float) xo = xi.astype(np.float) yo = yi.astype(np.float) if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0: raise ValueError, 'output grid defined by xi,yi must be monotone increasing' # allocate array for output (buffer will be overwritten by nagridd) zo = np.empty((yo.shape[0],xo.shape[0]), np.float) _natgrid.natgridd(x,y,z,xo,yo,zo) else: # use Robert Kern's delaunay package from scikits (default) if xi.ndim != yi.ndim: raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)") if xi.ndim != 1 and xi.ndim != 2: raise TypeError("inputs xi and yi must be 1D or 2D.") if xi.ndim == 1: xi,yi = np.meshgrid(xi,yi) # triangulate data tri = delaunay.Triangulation(x,y) # interpolate data interp = tri.nn_interpolator(z) zo = interp(xi,yi) # mask points on grid outside convex hull of input data. if np.any(np.isnan(zo)): zo = np.ma.masked_where(np.isnan(zo),zo) return zo griddata._reported = False ################################################## # Linear interpolation algorithms ################################################## def less_simple_linear_interpolation( x, y, xi, extrap=False ): """ This function provides simple (but somewhat less so than :func:`cbook.simple_linear_interpolation`) linear interpolation. :func:`simple_linear_interpolation` will give a list of point between a start and an end, while this does true linear interpolation at an arbitrary set of points. This is very inefficient linear interpolation meant to be used only for a small number of points in relatively non-intensive use cases. For real linear interpolation, use scipy. """ if cbook.is_scalar(xi): xi = [xi] x = np.asarray(x) y = np.asarray(y) xi = np.asarray(xi) s = list(y.shape) s[0] = len(xi) yi = np.tile( np.nan, s ) for ii,xx in enumerate(xi): bb = x == xx if np.any(bb): jj, = np.nonzero(bb) yi[ii] = y[jj[0]] elif xx<x[0]: if extrap: yi[ii] = y[0] elif xx>x[-1]: if extrap: yi[ii] = y[-1] else: jj, = np.nonzero(x<xx) jj = max(jj) yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj]) return yi def slopes(x,y): """ :func:`slopes` calculates the slope *y*'(*x*) The slope is estimated using the slope obtained from that of a parabola through any three consecutive points. This method should be superior to that described in the appendix of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel W. Stineman (Creative Computing July 1980) in at least one aspect: Circles for interpolation demand a known aspect ratio between *x*- and *y*-values. For many functions, however, the abscissa are given in different dimensions, so an aspect ratio is completely arbitrary. The parabola method gives very similar results to the circle method for most regular cases but behaves much better in special cases. Norbert Nemec, Institute of Theoretical Physics, University or Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de (inspired by a original implementation by Halldor Bjornsson, Icelandic Meteorological Office, March 2006 halldor at vedur.is) """ # Cast key variables as float. x=np.asarray(x, np.float_) y=np.asarray(y, np.float_) yp=np.zeros(y.shape, np.float_) dx=x[1:] - x[:-1] dy=y[1:] - y[:-1] dydx = dy/dx yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1]) yp[0] = 2.0 * dy[0]/dx[0] - yp[1] yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2] return yp def stineman_interp(xi,x,y,yp=None): """ Given data vectors *x* and *y*, the slope vector *yp* and a new abscissa vector *xi*, the function :func:`stineman_interp` uses Stineman interpolation to calculate a vector *yi* corresponding to *xi*. Here's an example that generates a coarse sine curve, then interpolates over a finer abscissa:: x = linspace(0,2*pi,20); y = sin(x); yp = cos(x) xi = linspace(0,2*pi,40); yi = stineman_interp(xi,x,y,yp); plot(x,y,'o',xi,yi) The interpolation method is described in the article A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell W. Stineman. The article appeared in the July 1980 issue of Creative Computing with a note from the editor stating that while they were: not an academic journal but once in a while something serious and original comes in adding that this was "apparently a real solution" to a well known problem. For *yp* = *None*, the routine automatically determines the slopes using the :func:`slopes` routine. *x* is assumed to be sorted in increasing order. For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine tries an extrapolation. The relevance of the data obtained from this, of course, is questionable... Original implementation by Halldor Bjornsson, Icelandic Meteorolocial Office, March 2006 halldor at vedur.is Completely reworked and optimized for Python by Norbert Nemec, Institute of Theoretical Physics, University or Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de """ # Cast key variables as float. x=np.asarray(x, np.float_) y=np.asarray(y, np.float_) assert x.shape == y.shape N=len(y) if yp is None: yp = slopes(x,y) else: yp=np.asarray(yp, np.float_) xi=np.asarray(xi, np.float_) yi=np.zeros(xi.shape, np.float_) # calculate linear slopes dx = x[1:] - x[:-1] dy = y[1:] - y[:-1] s = dy/dx #note length of s is N-1 so last element is #N-2 # find the segment each xi is in # this line actually is the key to the efficiency of this implementation idx = np.searchsorted(x[1:-1], xi) # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1] # except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1] # the y-values that would come out from a linear interpolation: sidx = s.take(idx) xidx = x.take(idx) yidx = y.take(idx) xidxp1 = x.take(idx+1) yo = yidx + sidx * (xi - xidx) # the difference that comes when using the slopes given in yp dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point dy1dy2 = dy1*dy2 # The following is optimized for Python. The solution actually # does more calculations than necessary but exploiting the power # of numpy, this is far more efficient than coding a loop by hand # in Python yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1, ((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)), 0.0, 1/(dy1+dy2),)) return yi ################################################## # Code related to things in and around polygons ################################################## def inside_poly(points, verts): """ *points* is a sequence of *x*, *y* points. *verts* is a sequence of *x*, *y* vertices of a polygon. Return value is a sequence of indices into points for the points that are inside the polygon. """ res, = np.nonzero(nxutils.points_inside_poly(points, verts)) return res def poly_below(xmin, xs, ys): """ Given a sequence of *xs* and *ys*, return the vertices of a polygon that has a horizontal base at *xmin* and an upper bound at the *ys*. *xmin* is a scalar. Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg:: xv, yv = poly_below(0, x, y) ax.fill(xv, yv) """ if ma.isMaskedArray(xs) or ma.isMaskedArray(ys): nx = ma else: nx = np xs = nx.asarray(xs) ys = nx.asarray(ys) Nx = len(xs) Ny = len(ys) assert(Nx==Ny) x = xmin*nx.ones(2*Nx) y = nx.ones(2*Nx) x[:Nx] = xs y[:Nx] = ys y[Nx:] = ys[::-1] return x, y def poly_between(x, ylower, yupper): """ Given a sequence of *x*, *ylower* and *yupper*, return the polygon that fills the regions between them. *ylower* or *yupper* can be scalar or iterable. If they are iterable, they must be equal in length to *x*. Return value is *x*, *y* arrays for use with :meth:`matplotlib.axes.Axes.fill`. """ if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x): nx = ma else: nx = np Nx = len(x) if not cbook.iterable(ylower): ylower = ylower*nx.ones(Nx) if not cbook.iterable(yupper): yupper = yupper*nx.ones(Nx) x = nx.concatenate( (x, x[::-1]) ) y = nx.concatenate( (yupper, ylower[::-1]) ) return x,y def is_closed_polygon(X): """ Tests whether first and last object in a sequence are the same. These are presumably coordinates on a polygonal curve, in which case this function tests if that curve is closed. """ return np.all(X[0] == X[-1]) def contiguous_regions(mask): """ return a list of (ind0, ind1) such that mask[ind0:ind1].all() is True and we cover all such regions TODO: this is a pure python implementation which probably has a much faster numpy impl """ in_region = None boundaries = [] for i, val in enumerate(mask): if in_region is None and val: in_region = i elif in_region is not None and not val: boundaries.append((in_region, i)) in_region = None if in_region is not None: boundaries.append((in_region, i+1)) return boundaries ################################################## # Vector and path length geometry calculations ################################################## def vector_lengths( X, P=2., axis=None ): """ Finds the length of a set of vectors in *n* dimensions. This is like the :func:`numpy.norm` function for vectors, but has the ability to work over a particular axis of the supplied array or matrix. Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the elements of *X* along the given axis. If *axis* is *None*, compute over all elements of *X*. """ X = np.asarray(X) return (np.sum(X**(P),axis=axis))**(1./P) def distances_along_curve( X ): """ Computes the distance between a set of successive points in *N* dimensions. Where *X* is an *M* x *N* array or matrix. The distances between successive rows is computed. Distance is the standard Euclidean distance. """ X = np.diff( X, axis=0 ) return vector_lengths(X,axis=1) def path_length(X): """ Computes the distance travelled along a polygonal curve in *N* dimensions. Where *X* is an *M* x *N* array or matrix. Returns an array of length *M* consisting of the distance along the curve at each point (i.e., the rows of *X*). """ X = distances_along_curve(X) return np.concatenate( (np.zeros(1), np.cumsum(X)) ) def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y): """ Converts a quadratic Bezier curve to a cubic approximation. The inputs are the *x* and *y* coordinates of the three control points of a quadratic curve, and the output is a tuple of *x* and *y* coordinates of the four control points of the cubic curve. """ # c0x, c0y = q0x, q0y c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y) c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y) # c3x, c3y = q2x, q2y return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
agpl-3.0
shusenl/scikit-learn
examples/feature_selection/plot_rfe_with_cross_validation.py
226
1384
""" =================================================== Recursive feature elimination with cross-validation =================================================== A recursive feature elimination example with automatic tuning of the number of features selected with cross-validation. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold from sklearn.feature_selection import RFECV from sklearn.datasets import make_classification # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0) # Create the RFE object and compute a cross-validated score. svc = SVC(kernel="linear") # The "accuracy" scoring is proportional to the number of correct # classifications rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2), scoring='accuracy') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show()
bsd-3-clause
lmregus/Portfolio
python/design_patterns/env/lib/python3.7/site-packages/IPython/core/tests/test_display.py
1
14014
# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import json import os import warnings from unittest import mock import nose.tools as nt from IPython.core import display from IPython.core.getipython import get_ipython from IPython.utils.io import capture_output from IPython.utils.tempdir import NamedFileInTemporaryDirectory from IPython import paths as ipath from IPython.testing.tools import AssertPrints, AssertNotPrints import IPython.testing.decorators as dec def test_image_size(): """Simple test for display.Image(args, width=x,height=y)""" thisurl = 'http://www.google.fr/images/srpr/logo3w.png' img = display.Image(url=thisurl, width=200, height=200) nt.assert_equal(u'<img src="%s" width="200" height="200"/>' % (thisurl), img._repr_html_()) img = display.Image(url=thisurl, metadata={'width':200, 'height':200}) nt.assert_equal(u'<img src="%s" width="200" height="200"/>' % (thisurl), img._repr_html_()) img = display.Image(url=thisurl, width=200) nt.assert_equal(u'<img src="%s" width="200"/>' % (thisurl), img._repr_html_()) img = display.Image(url=thisurl) nt.assert_equal(u'<img src="%s"/>' % (thisurl), img._repr_html_()) img = display.Image(url=thisurl, unconfined=True) nt.assert_equal(u'<img src="%s" class="unconfined"/>' % (thisurl), img._repr_html_()) def test_image_mimes(): fmt = get_ipython().display_formatter.format for format in display.Image._ACCEPTABLE_EMBEDDINGS: mime = display.Image._MIMETYPES[format] img = display.Image(b'garbage', format=format) data, metadata = fmt(img) nt.assert_equal(sorted(data), sorted([mime, 'text/plain'])) def test_geojson(): gj = display.GeoJSON(data={ "type": "Feature", "geometry": { "type": "Point", "coordinates": [-81.327, 296.038] }, "properties": { "name": "Inca City" } }, url_template="http://s3-eu-west-1.amazonaws.com/whereonmars.cartodb.net/{basemap_id}/{z}/{x}/{y}.png", layer_options={ "basemap_id": "celestia_mars-shaded-16k_global", "attribution": "Celestia/praesepe", "minZoom": 0, "maxZoom": 18, }) nt.assert_equal(u'<IPython.core.display.GeoJSON object>', str(gj)) def test_retina_png(): here = os.path.dirname(__file__) img = display.Image(os.path.join(here, "2x2.png"), retina=True) nt.assert_equal(img.height, 1) nt.assert_equal(img.width, 1) data, md = img._repr_png_() nt.assert_equal(md['width'], 1) nt.assert_equal(md['height'], 1) def test_retina_jpeg(): here = os.path.dirname(__file__) img = display.Image(os.path.join(here, "2x2.jpg"), retina=True) nt.assert_equal(img.height, 1) nt.assert_equal(img.width, 1) data, md = img._repr_jpeg_() nt.assert_equal(md['width'], 1) nt.assert_equal(md['height'], 1) def test_base64image(): display.Image("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej3aAAAAAWJLR0QAiAUdSAAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB94BCRQnOqNu0b4AAAAKSURBVAjXY2AAAAACAAHiIbwzAAAAAElFTkSuQmCC") def test_image_filename_defaults(): '''test format constraint, and validity of jpeg and png''' tpath = ipath.get_ipython_package_dir() nt.assert_raises(ValueError, display.Image, filename=os.path.join(tpath, 'testing/tests/badformat.zip'), embed=True) nt.assert_raises(ValueError, display.Image) nt.assert_raises(ValueError, display.Image, data='this is not an image', format='badformat', embed=True) # check boths paths to allow packages to test at build and install time imgfile = os.path.join(tpath, 'core/tests/2x2.png') img = display.Image(filename=imgfile) nt.assert_equal('png', img.format) nt.assert_is_not_none(img._repr_png_()) img = display.Image(filename=os.path.join(tpath, 'testing/tests/logo.jpg'), embed=False) nt.assert_equal('jpeg', img.format) nt.assert_is_none(img._repr_jpeg_()) def _get_inline_config(): from ipykernel.pylab.config import InlineBackend return InlineBackend.instance() @dec.skip_without('matplotlib') def test_set_matplotlib_close(): cfg = _get_inline_config() cfg.close_figures = False display.set_matplotlib_close() assert cfg.close_figures display.set_matplotlib_close(False) assert not cfg.close_figures _fmt_mime_map = { 'png': 'image/png', 'jpeg': 'image/jpeg', 'pdf': 'application/pdf', 'retina': 'image/png', 'svg': 'image/svg+xml', } @dec.skip_without('matplotlib') def test_set_matplotlib_formats(): from matplotlib.figure import Figure formatters = get_ipython().display_formatter.formatters for formats in [ ('png',), ('pdf', 'svg'), ('jpeg', 'retina', 'png'), (), ]: active_mimes = {_fmt_mime_map[fmt] for fmt in formats} display.set_matplotlib_formats(*formats) for mime, f in formatters.items(): if mime in active_mimes: nt.assert_in(Figure, f) else: nt.assert_not_in(Figure, f) @dec.skip_without('matplotlib') def test_set_matplotlib_formats_kwargs(): from matplotlib.figure import Figure ip = get_ipython() cfg = _get_inline_config() cfg.print_figure_kwargs.update(dict(foo='bar')) kwargs = dict(quality=10) display.set_matplotlib_formats('png', **kwargs) formatter = ip.display_formatter.formatters['image/png'] f = formatter.lookup_by_type(Figure) cell = f.__closure__[0].cell_contents expected = kwargs expected.update(cfg.print_figure_kwargs) nt.assert_equal(cell, expected) def test_display_available(): """ Test that display is available without import We don't really care if it's in builtin or anything else, but it should always be available. """ ip = get_ipython() with AssertNotPrints('NameError'): ip.run_cell('display') try: ip.run_cell('del display') except NameError: pass # it's ok, it might be in builtins # even if deleted it should be back with AssertNotPrints('NameError'): ip.run_cell('display') def test_textdisplayobj_pretty_repr(): p = display.Pretty("This is a simple test") nt.assert_equal(repr(p), '<IPython.core.display.Pretty object>') nt.assert_equal(p.data, 'This is a simple test') p._show_mem_addr = True nt.assert_equal(repr(p), object.__repr__(p)) def test_displayobject_repr(): h = display.HTML('<br />') nt.assert_equal(repr(h), '<IPython.core.display.HTML object>') h._show_mem_addr = True nt.assert_equal(repr(h), object.__repr__(h)) h._show_mem_addr = False nt.assert_equal(repr(h), '<IPython.core.display.HTML object>') j = display.Javascript('') nt.assert_equal(repr(j), '<IPython.core.display.Javascript object>') j._show_mem_addr = True nt.assert_equal(repr(j), object.__repr__(j)) j._show_mem_addr = False nt.assert_equal(repr(j), '<IPython.core.display.Javascript object>') @mock.patch('warnings.warn') def test_encourage_iframe_over_html(m_warn): display.HTML() m_warn.assert_not_called() display.HTML('<br />') m_warn.assert_not_called() display.HTML('<html><p>Lots of content here</p><iframe src="http://a.com"></iframe>') m_warn.assert_not_called() display.HTML('<iframe src="http://a.com"></iframe>') m_warn.assert_called_with('Consider using IPython.display.IFrame instead') m_warn.reset_mock() display.HTML('<IFRAME SRC="http://a.com"></IFRAME>') m_warn.assert_called_with('Consider using IPython.display.IFrame instead') def test_progress(): p = display.ProgressBar(10) nt.assert_in('0/10',repr(p)) p.html_width = '100%' p.progress = 5 nt.assert_equal(p._repr_html_(), "<progress style='width:100%' max='10' value='5'></progress>") def test_progress_iter(): with capture_output(display=False) as captured: for i in display.ProgressBar(5): out = captured.stdout nt.assert_in('{0}/5'.format(i), out) out = captured.stdout nt.assert_in('5/5', out) def test_json(): d = {'a': 5} lis = [d] metadata = [ {'expanded': False, 'root': 'root'}, {'expanded': True, 'root': 'root'}, {'expanded': False, 'root': 'custom'}, {'expanded': True, 'root': 'custom'}, ] json_objs = [ display.JSON(d), display.JSON(d, expanded=True), display.JSON(d, root='custom'), display.JSON(d, expanded=True, root='custom'), ] for j, md in zip(json_objs, metadata): nt.assert_equal(j._repr_json_(), (d, md)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") j = display.JSON(json.dumps(d)) nt.assert_equal(len(w), 1) nt.assert_equal(j._repr_json_(), (d, metadata[0])) json_objs = [ display.JSON(lis), display.JSON(lis, expanded=True), display.JSON(lis, root='custom'), display.JSON(lis, expanded=True, root='custom'), ] for j, md in zip(json_objs, metadata): nt.assert_equal(j._repr_json_(), (lis, md)) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") j = display.JSON(json.dumps(lis)) nt.assert_equal(len(w), 1) nt.assert_equal(j._repr_json_(), (lis, metadata[0])) def test_video_embedding(): """use a tempfile, with dummy-data, to ensure that video embedding doesn't crash""" v = display.Video("http://ignored") assert not v.embed html = v._repr_html_() nt.assert_not_in('src="data:', html) nt.assert_in('src="http://ignored"', html) with nt.assert_raises(ValueError): v = display.Video(b'abc') with NamedFileInTemporaryDirectory('test.mp4') as f: f.write(b'abc') f.close() v = display.Video(f.name) assert not v.embed html = v._repr_html_() nt.assert_not_in('src="data:', html) v = display.Video(f.name, embed=True) html = v._repr_html_() nt.assert_in('src="data:video/mp4;base64,YWJj"',html) v = display.Video(f.name, embed=True, mimetype='video/other') html = v._repr_html_() nt.assert_in('src="data:video/other;base64,YWJj"',html) v = display.Video(b'abc', embed=True, mimetype='video/mp4') html = v._repr_html_() nt.assert_in('src="data:video/mp4;base64,YWJj"',html) v = display.Video(u'YWJj', embed=True, mimetype='video/xyz') html = v._repr_html_() nt.assert_in('src="data:video/xyz;base64,YWJj"',html) def test_html_metadata(): s = "<h1>Test</h1>" h = display.HTML(s, metadata={"isolated": True}) nt.assert_equal(h._repr_html_(), (s, {"isolated": True})) def test_display_id(): ip = get_ipython() with mock.patch.object(ip.display_pub, 'publish') as pub: handle = display.display('x') nt.assert_is(handle, None) handle = display.display('y', display_id='secret') nt.assert_is_instance(handle, display.DisplayHandle) handle2 = display.display('z', display_id=True) nt.assert_is_instance(handle2, display.DisplayHandle) nt.assert_not_equal(handle.display_id, handle2.display_id) nt.assert_equal(pub.call_count, 3) args, kwargs = pub.call_args_list[0] nt.assert_equal(args, ()) nt.assert_equal(kwargs, { 'data': { 'text/plain': repr('x') }, 'metadata': {}, }) args, kwargs = pub.call_args_list[1] nt.assert_equal(args, ()) nt.assert_equal(kwargs, { 'data': { 'text/plain': repr('y') }, 'metadata': {}, 'transient': { 'display_id': handle.display_id, }, }) args, kwargs = pub.call_args_list[2] nt.assert_equal(args, ()) nt.assert_equal(kwargs, { 'data': { 'text/plain': repr('z') }, 'metadata': {}, 'transient': { 'display_id': handle2.display_id, }, }) def test_update_display(): ip = get_ipython() with mock.patch.object(ip.display_pub, 'publish') as pub: with nt.assert_raises(TypeError): display.update_display('x') display.update_display('x', display_id='1') display.update_display('y', display_id='2') args, kwargs = pub.call_args_list[0] nt.assert_equal(args, ()) nt.assert_equal(kwargs, { 'data': { 'text/plain': repr('x') }, 'metadata': {}, 'transient': { 'display_id': '1', }, 'update': True, }) args, kwargs = pub.call_args_list[1] nt.assert_equal(args, ()) nt.assert_equal(kwargs, { 'data': { 'text/plain': repr('y') }, 'metadata': {}, 'transient': { 'display_id': '2', }, 'update': True, }) def test_display_handle(): ip = get_ipython() handle = display.DisplayHandle() nt.assert_is_instance(handle.display_id, str) handle = display.DisplayHandle('my-id') nt.assert_equal(handle.display_id, 'my-id') with mock.patch.object(ip.display_pub, 'publish') as pub: handle.display('x') handle.update('y') args, kwargs = pub.call_args_list[0] nt.assert_equal(args, ()) nt.assert_equal(kwargs, { 'data': { 'text/plain': repr('x') }, 'metadata': {}, 'transient': { 'display_id': handle.display_id, } }) args, kwargs = pub.call_args_list[1] nt.assert_equal(args, ()) nt.assert_equal(kwargs, { 'data': { 'text/plain': repr('y') }, 'metadata': {}, 'transient': { 'display_id': handle.display_id, }, 'update': True, })
mit
tapomayukh/projects_in_python
rapid_categorization/misc/hmm_online_force_10_states_pr2.py
1
14050
# Hidden Markov Model Implementation import math import pylab as pyl import numpy as np import matplotlib.pyplot as pp import matplotlib.cm as cm #from enthought.mayavi import mlab import scipy as scp import scipy.ndimage as ni import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle import ghmm from visualization_msgs.msg import Marker from visualization_msgs.msg import MarkerArray from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact from hrl_haptic_manipulation_in_clutter_msgs.msg import ObjectInfo from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray from m3skin_ros.msg import TaxelArray as TaxelArray_Meka from hrl_msgs.msg import FloatArrayBare from geometry_msgs.msg import Point from geometry_msgs.msg import Vector3 import std_msgs.msg import sys sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/384') from data_384 import Fmat_original # Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models def feature_to_mu_sigma(fvec): index = 0 m,n = np.shape(fvec) #print m,n mu = np.matrix(np.zeros((10,1))) sigma = np.matrix(np.zeros((10,1))) DIVS = m/10 while (index < 10): m_init = index*DIVS temp_fvec = fvec[(m_init):(m_init+DIVS),0:] #if index == 1: #print temp_fvec mu[index] = scp.mean(temp_fvec) sigma[index] = scp.std(temp_fvec) index = index+1 return mu,sigma def callback(data): rospy.loginfo('Getting data!') # Processing Force Data force_vectors = np.row_stack([data.values_x, data.values_y, data.values_z]) fmags_instant = ut.norm(force_vectors) #print fmags_instant threshold = 0.01 fmags_tuned = fmags_instant - threshold fmags_tuned[np.where(fmags_tuned<0)]=0 #print fmags_tuned fmags_instant_tuned = fmags_tuned global fmags fmags = np.row_stack([fmags,fmags_instant_tuned]) # Processing Contact Data contact_vectors = np.row_stack([data.centers_x, data.centers_y, data.centers_z]) #print np.shape(contact_vectors) contact_regions = fmags_instant > threshold lb,ls = ni.label(contact_regions) total_contact = ni.sum(lb) # After thresholding, assuming one connected component (experiment designed that way) #print total_contact # Calculating force data for contact with hand-tuned force threshold #total_forces = ni.sum(fmags_instant,lb) #mean_forces = ni.mean(fmags_instant,lb) temp = fmags_instant*lb global max_force max_force = np.max(temp) global time_varying_data time_varying_data.append(max_force) if total_contact > 0: global contact_point_world global_x = ni.mean(contact_vectors[0,:],lb) global_y = ni.mean(contact_vectors[1,:],lb) global_z = ni.mean(contact_vectors[2,:],lb) print global_x, global_y, global_z contact_point_world = [global_x,global_y,global_z] #print contact_point_world #print np.shape(contact_point_world) global time_instant_contact_data time_instant_contact_data = [contact_point_world[0],contact_point_world[1],contact_point_world[2]] #print time_instant_contact_data global time_varying_contact_data time_varying_contact_data = np.row_stack([time_varying_contact_data, time_instant_contact_data]) test_data() def tracking_point(): rospy.loginfo('Tracking Distance!') ta = time_varying_contact_data k = 0 for i in ta[:,0]: if i != ta[-1,0]: instant_dist = math.sqrt((ta[k+1,1]-ta[1,1])**2 + (ta[k+1,2]-ta[1,2])**2 + (ta[k+1,3]-ta[1,3])**2) time_instant_tracker = [ta[k+1,0], instant_dist] global time_varying_tracker time_varying_tracker = np.row_stack([time_varying_tracker, time_instant_tracker]) k=k+1 def test_data(): pub = rospy.Publisher("/hmm/object_data", ObjectInfo) obj_info = ObjectInfo() obj_info.obj_id = 0 obj_info.contact_x = 0 obj_info.contact_y = 0 obj_info.contact_z = 0 global index global FLAG_Trunk global FLAG_Trunk_List global FLAG_Leaf global FLAG_Leaf_List global time_instant_contact_data # For Testing global time_varying_data global max_force if (max_force > 0): ts_obj = time_varying_data final_ts_obj = ghmm.EmissionSequence(F,ts_obj) # Find Viterbi Path global model_rf global model_rm global model_sf global model_sm path_rf_obj = model_rf.viterbi(final_ts_obj) path_rm_obj = model_rm.viterbi(final_ts_obj) path_sf_obj = model_sf.viterbi(final_ts_obj) path_sm_obj = model_sm.viterbi(final_ts_obj) #print path_rf_obj[1], path_rm_obj[1], path_sf_obj[1], path_sm_obj[1] diff_rf = min(abs(path_rf_obj[1]-path_rm_obj[1]),abs(path_rf_obj[1]-path_sf_obj[1]),abs(path_rf_obj[1]-path_sm_obj[1])) diff_rm = min(abs(path_rm_obj[1]-path_rf_obj[1]),abs(path_rm_obj[1]-path_sf_obj[1]),abs(path_rm_obj[1]-path_sm_obj[1])) diff_sf = min(abs(path_sf_obj[1]-path_rf_obj[1]),abs(path_sf_obj[1]-path_rm_obj[1]),abs(path_sf_obj[1]-path_sm_obj[1])) diff_sm = min(abs(path_sm_obj[1]-path_rf_obj[1]),abs(path_sm_obj[1]-path_rm_obj[1]),abs(path_sm_obj[1]-path_sf_obj[1])) obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1]) obj_min = min(abs(path_rf_obj[1]),abs(path_rm_obj[1]),abs(path_sf_obj[1]),abs(path_sm_obj[1])) #print ' ' #print '################################################################################' if ((obj == path_rf_obj[1]) and (diff_rf > 500)): #if ((obj == path_rf_obj[1]) and (obj_min > 1800)): #print 'Rigid_Fixed :' FLAG_Trunk = True FLAG_Leaf = False obj_info.obj_id = 1 obj_info.contact_x = time_instant_contact_data[0] obj_info.contact_y = time_instant_contact_data[1] obj_info.contact_z = time_instant_contact_data[2] pub.publish(obj_info) elif ((obj == path_rm_obj[1]) and (diff_rm > 500)): #elif ((obj == path_rm_obj[1]) and (obj_min > 1800)): print 'Rigid_Fixed :' FLAG_Trunk = False FLAG_Leaf = False obj_info.obj_id = 2 obj_info.contact_x = time_instant_contact_data[0] obj_info.contact_y = time_instant_contact_data[1] obj_info.contact_z = time_instant_contact_data[2] pub.publish(obj_info) elif ((obj == path_sf_obj[1]) and (diff_sf > 500)): #elif ((obj == path_sf_obj[1]) and (obj_min > 1800)): print 'Soft_Fixed :' FLAG_Trunk = False FLAG_Leaf = True obj_info.obj_id = 3 obj_info.contact_x = time_instant_contact_data[0] obj_info.contact_y = time_instant_contact_data[1] obj_info.contact_z = time_instant_contact_data[2] pub.publish(obj_info) elif ((obj == path_sm_obj[1]) and (diff_sm > 500)): #elif ((obj == path_sm_obj[1]) and (obj_min > 1800)): print 'Soft_Movable :' FLAG_Trunk = False FLAG_Leaf = False obj_info.obj_id = 4 obj_info.contact_x = time_instant_contact_data[0] obj_info.contact_y = time_instant_contact_data[1] obj_info.contact_z = time_instant_contact_data[2] pub.publish(obj_info) else: print 'Unknown' FLAG_Trunk = False FLAG_Leaf = False object_ID = 0 pub.publish(obj_info) FLAG_Trunk_List.append(FLAG_Trunk) FLAG_Leaf_List.append(FLAG_Leaf) #print '################################################################################' #print ' ' else: print 'Unknown' FLAG_Trunk = False FLAG_Leaf = False object_ID = 0 pub.publish(obj_info) time_varying_data = [0] def getdata(): rospy.init_node('getdata', anonymous=True) rospy.Subscriber("pr2_fabric_forearm_sensor/taxels/forces", TaxelArray_Meka, callback) rospy.spin() def getpics(): global FLAG_Trunk_List global FLAG_Leaf_List j = 0 #index = 0 # For Fixed Pillow index = 16 #For Fixed Styrofoam, Fixed Foliage, Movable Styrofoam, and Movable Pillow for i in fmags: force_arr_raw = fmags[j,:] force_arr_raw_reshaped = force_arr_raw.reshape((16,24)) force_arr = np.column_stack((force_arr_raw_reshaped[:,index:],force_arr_raw_reshaped[:,:index])) global frame frame = frame + 1 if ((not FLAG_Trunk_List[j]) and (not FLAG_Leaf_List[j])): #Keep it Black pp.imshow(force_arr, interpolation='nearest', cmap=cm.binary, origin='upper', vmin=0, vmax=1) elif ((FLAG_Trunk_List[j]) and (not FLAG_Leaf_List[j])): #Make it Red pp.imshow(force_arr, interpolation='nearest', cmap=cm.Reds, origin='upper', vmin=0, vmax=1) elif ((not FLAG_Trunk_List[j]) and (FLAG_Leaf_List[j])): #Make it Green pp.imshow(force_arr, interpolation='nearest', cmap=cm.Greens, origin='upper', vmin=0, vmax=1) pp.title('Unrolled Taxel Array') pp.xlabel('Along the circumference') pp.ylabel('Along the forearm') pp.xlim(-0.5,23.5) pp.ylim(15.5, -0.5) pyl.savefig('%05d.%s'% (frame, format)) j = j+1 if __name__ == '__main__': Fmat = Fmat_original # Checking the Data-Matrix m_tot, n_tot = np.shape(Fmat) #print " " #print 'Total_Matrix_Shape:',m_tot,n_tot mu_rf,sigma_rf = feature_to_mu_sigma(Fmat[0:121,0:35]) mu_rm,sigma_rm = feature_to_mu_sigma(Fmat[0:121,35:70]) mu_sf,sigma_sf = feature_to_mu_sigma(Fmat[0:121,70:105]) mu_sm,sigma_sm = feature_to_mu_sigma(Fmat[0:121,105:140]) #print [mu_rf, sigma_rf] # HMM - Implementation: # 10 Hidden States # Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state # Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable # Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch) # For new objects, it is classified according to which model it represenst the closest.. F = ghmm.Float() # emission domain of this model # A - Transition Matrix A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]] # B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma) B_rf = np.zeros((10,2)) B_rm = np.zeros((10,2)) B_sf = np.zeros((10,2)) B_sm = np.zeros((10,2)) for num_states in range(10): B_rf[num_states,0] = mu_rf[num_states] B_rf[num_states,1] = sigma_rf[num_states] B_rm[num_states,0] = mu_rm[num_states] B_rm[num_states,1] = sigma_rm[num_states] B_sf[num_states,0] = mu_sf[num_states] B_sf[num_states,1] = sigma_sf[num_states] B_sm[num_states,0] = mu_sm[num_states] B_sm[num_states,1] = sigma_sm[num_states] B_rf = B_rf.tolist() B_rm = B_rm.tolist() B_sf = B_sf.tolist() B_sm = B_sm.tolist() # pi - initial probabilities per state pi = [0.1] * 10 # generate RF, RM, SF, SM models from parameters model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained total_seq = Fmat[0:121,:] # For Training total_seq_rf = total_seq[0:121,1:35] total_seq_rm = total_seq[0:121,36:70] total_seq_sf = total_seq[0:121,71:105] total_seq_sm = total_seq[0:121,106:140] train_seq_rf = (np.array(total_seq_rf).T).tolist() train_seq_rm = (np.array(total_seq_rm).T).tolist() train_seq_sf = (np.array(total_seq_sf).T).tolist() train_seq_sm = (np.array(total_seq_sm).T).tolist() final_ts_rf = ghmm.SequenceSet(F,train_seq_rf) final_ts_rm = ghmm.SequenceSet(F,train_seq_rm) final_ts_sf = ghmm.SequenceSet(F,train_seq_sf) final_ts_sm = ghmm.SequenceSet(F,train_seq_sm) model_rf.baumWelch(final_ts_rf) model_rm.baumWelch(final_ts_rm) model_sf.baumWelch(final_ts_sf) model_sm.baumWelch(final_ts_sm) # Gather Data from Robot Online index = 0 frame = 0 max_force = 0 format = 'png' fmags = np.zeros(22) contact_point_world = [0,0,0] time_varying_data = [0] time_instant_contact_data = [0,0,0] time_varying_contact_data = [0,0,0] time_varying_tracker = [0,0] FLAG_Trunk = False FLAG_Leaf = False FLAG_Trunk_List = [False] FLAG_Leaf_List = [False] getdata() #getpics() #tracking_point()
mit
cactusbin/nyt
matplotlib/lib/matplotlib/tests/test_collections.py
3
13719
""" Tests specific to the collections module. """ from nose.tools import assert_equal import numpy as np from numpy.testing import assert_array_equal import matplotlib.pyplot as plt import matplotlib.collections as mcollections import matplotlib.transforms as mtransforms from matplotlib.collections import EventCollection from matplotlib.testing.decorators import cleanup, image_comparison def generate_EventCollection_plot(): ''' generate the initial collection and plot it ''' positions = np.array([0., 1., 2., 3., 5., 8., 13., 21.]) extra_positions = np.array([34., 55., 89.]) orientation = 'horizontal' lineoffset = 1 linelength = .5 linewidth = 2 color = [1, 0, 0, 1] linestyle = 'solid' antialiased = True coll = EventCollection(positions, orientation=orientation, lineoffset=lineoffset, linelength=linelength, linewidth=linewidth, color=color, linestyle=linestyle, antialiased=antialiased ) fig = plt.figure() splt = fig.add_subplot(1, 1, 1) splt.add_collection(coll) splt.set_title('EventCollection: default') props = {'positions': positions, 'extra_positions': extra_positions, 'orientation': orientation, 'lineoffset': lineoffset, 'linelength': linelength, 'linewidth': linewidth, 'color': color, 'linestyle': linestyle, 'antialiased': antialiased } splt.set_xlim(-1, 22) splt.set_ylim(0, 2) return splt, coll, props @image_comparison(baseline_images=['EventCollection_plot__default']) def test__EventCollection__get_segments(): ''' check to make sure the default segments have the correct coordinates ''' _, coll, props = generate_EventCollection_plot() check_segments(coll, props['positions'], props['linelength'], props['lineoffset'], props['orientation']) @cleanup def test__EventCollection__get_positions(): ''' check to make sure the default positions match the input positions ''' _, coll, props = generate_EventCollection_plot() np.testing.assert_array_equal(props['positions'], coll.get_positions()) @cleanup def test__EventCollection__get_orientation(): ''' check to make sure the default orientation matches the input orientation ''' _, coll, props = generate_EventCollection_plot() assert_equal(props['orientation'], coll.get_orientation()) @cleanup def test__EventCollection__is_horizontal(): ''' check to make sure the default orientation matches the input orientation ''' _, coll, _ = generate_EventCollection_plot() assert_equal(True, coll.is_horizontal()) @cleanup def test__EventCollection__get_linelength(): ''' check to make sure the default linelength matches the input linelength ''' _, coll, props = generate_EventCollection_plot() assert_equal(props['linelength'], coll.get_linelength()) @cleanup def test__EventCollection__get_lineoffset(): ''' check to make sure the default lineoffset matches the input lineoffset ''' _, coll, props = generate_EventCollection_plot() assert_equal(props['lineoffset'], coll.get_lineoffset()) @cleanup def test__EventCollection__get_linestyle(): ''' check to make sure the default linestyle matches the input linestyle ''' _, coll, _ = generate_EventCollection_plot() assert_equal(coll.get_linestyle(), [(None, None)]) @cleanup def test__EventCollection__get_color(): ''' check to make sure the default color matches the input color ''' _, coll, props = generate_EventCollection_plot() np.testing.assert_array_equal(props['color'], coll.get_color()) check_allprop_array(coll.get_colors(), props['color']) @image_comparison(baseline_images=['EventCollection_plot__set_positions']) def test__EventCollection__set_positions(): ''' check to make sure set_positions works properly ''' splt, coll, props = generate_EventCollection_plot() new_positions = np.hstack([props['positions'], props['extra_positions']]) coll.set_positions(new_positions) np.testing.assert_array_equal(new_positions, coll.get_positions()) check_segments(coll, new_positions, props['linelength'], props['lineoffset'], props['orientation']) splt.set_title('EventCollection: set_positions') splt.set_xlim(-1, 90) @image_comparison(baseline_images=['EventCollection_plot__add_positions']) def test__EventCollection__add_positions(): ''' check to make sure add_positions works properly ''' splt, coll, props = generate_EventCollection_plot() new_positions = np.hstack([props['positions'], props['extra_positions'][0]]) coll.add_positions(props['extra_positions'][0]) np.testing.assert_array_equal(new_positions, coll.get_positions()) check_segments(coll, new_positions, props['linelength'], props['lineoffset'], props['orientation']) splt.set_title('EventCollection: add_positions') splt.set_xlim(-1, 35) @image_comparison(baseline_images=['EventCollection_plot__append_positions']) def test__EventCollection__append_positions(): ''' check to make sure append_positions works properly ''' splt, coll, props = generate_EventCollection_plot() new_positions = np.hstack([props['positions'], props['extra_positions'][2]]) coll.append_positions(props['extra_positions'][2]) np.testing.assert_array_equal(new_positions, coll.get_positions()) check_segments(coll, new_positions, props['linelength'], props['lineoffset'], props['orientation']) splt.set_title('EventCollection: append_positions') splt.set_xlim(-1, 90) @image_comparison(baseline_images=['EventCollection_plot__extend_positions']) def test__EventCollection__extend_positions(): ''' check to make sure extend_positions works properly ''' splt, coll, props = generate_EventCollection_plot() new_positions = np.hstack([props['positions'], props['extra_positions'][1:]]) coll.extend_positions(props['extra_positions'][1:]) np.testing.assert_array_equal(new_positions, coll.get_positions()) check_segments(coll, new_positions, props['linelength'], props['lineoffset'], props['orientation']) splt.set_title('EventCollection: extend_positions') splt.set_xlim(-1, 90) @image_comparison(baseline_images=['EventCollection_plot__switch_orientation']) def test__EventCollection__switch_orientation(): ''' check to make sure switch_orientation works properly ''' splt, coll, props = generate_EventCollection_plot() new_orientation = 'vertical' coll.switch_orientation() assert_equal(new_orientation, coll.get_orientation()) assert_equal(False, coll.is_horizontal()) new_positions = coll.get_positions() check_segments(coll, new_positions, props['linelength'], props['lineoffset'], new_orientation) splt.set_title('EventCollection: switch_orientation') splt.set_ylim(-1, 22) splt.set_xlim(0, 2) @image_comparison(baseline_images= ['EventCollection_plot__switch_orientation__2x']) def test__EventCollection__switch_orientation_2x(): ''' check to make sure calling switch_orientation twice sets the orientation back to the default ''' splt, coll, props = generate_EventCollection_plot() coll.switch_orientation() coll.switch_orientation() new_positions = coll.get_positions() assert_equal(props['orientation'], coll.get_orientation()) assert_equal(True, coll.is_horizontal()) np.testing.assert_array_equal(props['positions'], new_positions) check_segments(coll, new_positions, props['linelength'], props['lineoffset'], props['orientation']) splt.set_title('EventCollection: switch_orientation 2x') @image_comparison(baseline_images=['EventCollection_plot__set_orientation']) def test__EventCollection__set_orientation(): ''' check to make sure set_orientation works properly ''' splt, coll, props = generate_EventCollection_plot() new_orientation = 'vertical' coll.set_orientation(new_orientation) assert_equal(new_orientation, coll.get_orientation()) assert_equal(False, coll.is_horizontal()) check_segments(coll, props['positions'], props['linelength'], props['lineoffset'], new_orientation) splt.set_title('EventCollection: set_orientation') splt.set_ylim(-1, 22) splt.set_xlim(0, 2) @image_comparison(baseline_images=['EventCollection_plot__set_linelength']) def test__EventCollection__set_linelength(): ''' check to make sure set_linelength works properly ''' splt, coll, props = generate_EventCollection_plot() new_linelength = 15 coll.set_linelength(new_linelength) assert_equal(new_linelength, coll.get_linelength()) check_segments(coll, props['positions'], new_linelength, props['lineoffset'], props['orientation']) splt.set_title('EventCollection: set_linelength') splt.set_ylim(-20, 20) @image_comparison(baseline_images=['EventCollection_plot__set_lineoffset']) def test__EventCollection__set_lineoffset(): ''' check to make sure set_lineoffset works properly ''' splt, coll, props = generate_EventCollection_plot() new_lineoffset = -5. coll.set_lineoffset(new_lineoffset) assert_equal(new_lineoffset, coll.get_lineoffset()) check_segments(coll, props['positions'], props['linelength'], new_lineoffset, props['orientation']) splt.set_title('EventCollection: set_lineoffset') splt.set_ylim(-6, -4) @image_comparison(baseline_images=['EventCollection_plot__set_linestyle']) def test__EventCollection__set_linestyle(): ''' check to make sure set_linestyle works properly ''' splt, coll, _ = generate_EventCollection_plot() new_linestyle = 'dashed' coll.set_linestyle(new_linestyle) assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))]) splt.set_title('EventCollection: set_linestyle') @image_comparison(baseline_images=['EventCollection_plot__set_linewidth']) def test__EventCollection__set_linewidth(): ''' check to make sure set_linestyle works properly ''' splt, coll, _ = generate_EventCollection_plot() new_linewidth = 5 coll.set_linewidth(new_linewidth) assert_equal(coll.get_linewidth(), new_linewidth) splt.set_title('EventCollection: set_linewidth') @image_comparison(baseline_images=['EventCollection_plot__set_color']) def test__EventCollection__set_color(): ''' check to make sure set_color works properly ''' splt, coll, _ = generate_EventCollection_plot() new_color = np.array([0, 1, 1, 1]) coll.set_color(new_color) np.testing.assert_array_equal(new_color, coll.get_color()) check_allprop_array(coll.get_colors(), new_color) splt.set_title('EventCollection: set_color') def check_segments(coll, positions, linelength, lineoffset, orientation): ''' check to make sure all values in the segment are correct, given a particular set of inputs note: this is not a test, it is used by tests ''' segments = coll.get_segments() if (orientation.lower() == 'horizontal' or orientation.lower() == 'none' or orientation is None): # if horizontal, the position in is in the y-axis pos1 = 1 pos2 = 0 elif orientation.lower() == 'vertical': # if vertical, the position in is in the x-axis pos1 = 0 pos2 = 1 else: raise ValueError("orientation must be 'horizontal' or 'vertical'") # test to make sure each segment is correct for i, segment in enumerate(segments): assert_equal(segment[0, pos1], lineoffset + linelength / 2.) assert_equal(segment[1, pos1], lineoffset - linelength / 2.) assert_equal(segment[0, pos2], positions[i]) assert_equal(segment[1, pos2], positions[i]) def check_allprop(values, target): ''' check to make sure all values match the given target note: this is not a test, it is used by tests ''' for value in values: assert_equal(value, target) def check_allprop_array(values, target): ''' check to make sure all values match the given target if arrays note: this is not a test, it is used by tests ''' for value in values: np.testing.assert_array_equal(value, target) def test_null_collection_datalim(): col = mcollections.PathCollection([]) col_data_lim = col.get_datalim(mtransforms.IdentityTransform()) assert_array_equal(col_data_lim.get_points(), mtransforms.Bbox([[0, 0], [0, 0]]).get_points()) if __name__ == '__main__': import nose nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
unlicense
abgoswam/data-science-from-scratch
code/nearest_neighbors.py
57
7357
from __future__ import division from collections import Counter from linear_algebra import distance from statistics import mean import math, random import matplotlib.pyplot as plt def raw_majority_vote(labels): votes = Counter(labels) winner, _ = votes.most_common(1)[0] return winner def majority_vote(labels): """assumes that labels are ordered from nearest to farthest""" vote_counts = Counter(labels) winner, winner_count = vote_counts.most_common(1)[0] num_winners = len([count for count in vote_counts.values() if count == winner_count]) if num_winners == 1: return winner # unique winner, so return it else: return majority_vote(labels[:-1]) # try again without the farthest def knn_classify(k, labeled_points, new_point): """each labeled point should be a pair (point, label)""" # order the labeled points from nearest to farthest by_distance = sorted(labeled_points, key=lambda (point, _): distance(point, new_point)) # find the labels for the k closest k_nearest_labels = [label for _, label in by_distance[:k]] # and let them vote return majority_vote(k_nearest_labels) cities = [(-86.75,33.5666666666667,'Python'),(-88.25,30.6833333333333,'Python'),(-112.016666666667,33.4333333333333,'Java'),(-110.933333333333,32.1166666666667,'Java'),(-92.2333333333333,34.7333333333333,'R'),(-121.95,37.7,'R'),(-118.15,33.8166666666667,'Python'),(-118.233333333333,34.05,'Java'),(-122.316666666667,37.8166666666667,'R'),(-117.6,34.05,'Python'),(-116.533333333333,33.8166666666667,'Python'),(-121.5,38.5166666666667,'R'),(-117.166666666667,32.7333333333333,'R'),(-122.383333333333,37.6166666666667,'R'),(-121.933333333333,37.3666666666667,'R'),(-122.016666666667,36.9833333333333,'Python'),(-104.716666666667,38.8166666666667,'Python'),(-104.866666666667,39.75,'Python'),(-72.65,41.7333333333333,'R'),(-75.6,39.6666666666667,'Python'),(-77.0333333333333,38.85,'Python'),(-80.2666666666667,25.8,'Java'),(-81.3833333333333,28.55,'Java'),(-82.5333333333333,27.9666666666667,'Java'),(-84.4333333333333,33.65,'Python'),(-116.216666666667,43.5666666666667,'Python'),(-87.75,41.7833333333333,'Java'),(-86.2833333333333,39.7333333333333,'Java'),(-93.65,41.5333333333333,'Java'),(-97.4166666666667,37.65,'Java'),(-85.7333333333333,38.1833333333333,'Python'),(-90.25,29.9833333333333,'Java'),(-70.3166666666667,43.65,'R'),(-76.6666666666667,39.1833333333333,'R'),(-71.0333333333333,42.3666666666667,'R'),(-72.5333333333333,42.2,'R'),(-83.0166666666667,42.4166666666667,'Python'),(-84.6,42.7833333333333,'Python'),(-93.2166666666667,44.8833333333333,'Python'),(-90.0833333333333,32.3166666666667,'Java'),(-94.5833333333333,39.1166666666667,'Java'),(-90.3833333333333,38.75,'Python'),(-108.533333333333,45.8,'Python'),(-95.9,41.3,'Python'),(-115.166666666667,36.0833333333333,'Java'),(-71.4333333333333,42.9333333333333,'R'),(-74.1666666666667,40.7,'R'),(-106.616666666667,35.05,'Python'),(-78.7333333333333,42.9333333333333,'R'),(-73.9666666666667,40.7833333333333,'R'),(-80.9333333333333,35.2166666666667,'Python'),(-78.7833333333333,35.8666666666667,'Python'),(-100.75,46.7666666666667,'Java'),(-84.5166666666667,39.15,'Java'),(-81.85,41.4,'Java'),(-82.8833333333333,40,'Java'),(-97.6,35.4,'Python'),(-122.666666666667,45.5333333333333,'Python'),(-75.25,39.8833333333333,'Python'),(-80.2166666666667,40.5,'Python'),(-71.4333333333333,41.7333333333333,'R'),(-81.1166666666667,33.95,'R'),(-96.7333333333333,43.5666666666667,'Python'),(-90,35.05,'R'),(-86.6833333333333,36.1166666666667,'R'),(-97.7,30.3,'Python'),(-96.85,32.85,'Java'),(-95.35,29.9666666666667,'Java'),(-98.4666666666667,29.5333333333333,'Java'),(-111.966666666667,40.7666666666667,'Python'),(-73.15,44.4666666666667,'R'),(-77.3333333333333,37.5,'Python'),(-122.3,47.5333333333333,'Python'),(-89.3333333333333,43.1333333333333,'R'),(-104.816666666667,41.15,'Java')] cities = [([longitude, latitude], language) for longitude, latitude, language in cities] def plot_state_borders(plt, color='0.8'): pass def plot_cities(): # key is language, value is pair (longitudes, latitudes) plots = { "Java" : ([], []), "Python" : ([], []), "R" : ([], []) } # we want each language to have a different marker and color markers = { "Java" : "o", "Python" : "s", "R" : "^" } colors = { "Java" : "r", "Python" : "b", "R" : "g" } for (longitude, latitude), language in cities: plots[language][0].append(longitude) plots[language][1].append(latitude) # create a scatter series for each language for language, (x, y) in plots.iteritems(): plt.scatter(x, y, color=colors[language], marker=markers[language], label=language, zorder=10) plot_state_borders(plt) # assume we have a function that does this plt.legend(loc=0) # let matplotlib choose the location plt.axis([-130,-60,20,55]) # set the axes plt.title("Favorite Programming Languages") plt.show() def classify_and_plot_grid(k=1): plots = { "Java" : ([], []), "Python" : ([], []), "R" : ([], []) } markers = { "Java" : "o", "Python" : "s", "R" : "^" } colors = { "Java" : "r", "Python" : "b", "R" : "g" } for longitude in range(-130, -60): for latitude in range(20, 55): predicted_language = knn_classify(k, cities, [longitude, latitude]) plots[predicted_language][0].append(longitude) plots[predicted_language][1].append(latitude) # create a scatter series for each language for language, (x, y) in plots.iteritems(): plt.scatter(x, y, color=colors[language], marker=markers[language], label=language, zorder=0) plot_state_borders(plt, color='black') # assume we have a function that does this plt.legend(loc=0) # let matplotlib choose the location plt.axis([-130,-60,20,55]) # set the axes plt.title(str(k) + "-Nearest Neighbor Programming Languages") plt.show() # # the curse of dimensionality # def random_point(dim): return [random.random() for _ in range(dim)] def random_distances(dim, num_pairs): return [distance(random_point(dim), random_point(dim)) for _ in range(num_pairs)] if __name__ == "__main__": # try several different values for k for k in [1, 3, 5, 7]: num_correct = 0 for location, actual_language in cities: other_cities = [other_city for other_city in cities if other_city != (location, actual_language)] predicted_language = knn_classify(k, other_cities, location) if predicted_language == actual_language: num_correct += 1 print k, "neighbor[s]:", num_correct, "correct out of", len(cities) dimensions = range(1, 101, 5) avg_distances = [] min_distances = [] random.seed(0) for dim in dimensions: distances = random_distances(dim, 10000) # 10,000 random pairs avg_distances.append(mean(distances)) # track the average min_distances.append(min(distances)) # track the minimum print dim, min(distances), mean(distances), min(distances) / mean(distances)
unlicense
jaidevd/scikit-learn
sklearn/linear_model/passive_aggressive.py
28
11542
# Authors: Rob Zinkov, Mathieu Blondel # License: BSD 3 clause from .stochastic_gradient import BaseSGDClassifier from .stochastic_gradient import BaseSGDRegressor from .stochastic_gradient import DEFAULT_EPSILON class PassiveAggressiveClassifier(BaseSGDClassifier): """Passive Aggressive Classifier Read more in the :ref:`User Guide <passive_aggressive>`. Parameters ---------- C : float Maximum step size (regularization). Defaults to 1.0. fit_intercept : bool, default=False Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. n_iter : int, optional The number of passes over the training data (aka epochs). Defaults to 5. shuffle : bool, default=True Whether or not the training data should be shuffled after each epoch. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level n_jobs : integer, optional The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. -1 means 'all CPUs'. Defaults to 1. loss : string, optional The loss function to be used: hinge: equivalent to PA-I in the reference paper. squared_hinge: equivalent to PA-II in the reference paper. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. class_weight : dict, {class_label: weight} or "balanced" or None, optional Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` .. versionadded:: 0.17 parameter *class_weight* to automatically weight samples. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So average=10 will begin averaging after seeing 10 samples. .. versionadded:: 0.19 parameter *average* to use weights averaging in SGD Attributes ---------- coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\ n_features] Weights assigned to the features. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. See also -------- SGDClassifier Perceptron References ---------- Online Passive-Aggressive Algorithms <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf> K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006) """ def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, loss="hinge", n_jobs=1, random_state=None, warm_start=False, class_weight=None, average=False): super(PassiveAggressiveClassifier, self).__init__( penalty=None, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, random_state=random_state, eta0=1.0, warm_start=warm_start, class_weight=class_weight, average=average, n_jobs=n_jobs) self.C = C self.loss = loss def partial_fit(self, X, y, classes=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Subset of the training data y : numpy array of shape [n_samples] Subset of the target values classes : array, shape = [n_classes] Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. Returns ------- self : returns an instance of self. """ if self.class_weight == 'balanced': raise ValueError("class_weight 'balanced' is not supported for " "partial_fit. For 'balanced' weights, use " "`sklearn.utils.compute_class_weight` with " "`class_weight='balanced'`. In place of y you " "can use a large enough subset of the full " "training set target to properly estimate the " "class frequency distributions. Pass the " "resulting weights as the class_weight " "parameter.") lr = "pa1" if self.loss == "hinge" else "pa2" return self._partial_fit(X, y, alpha=1.0, C=self.C, loss="hinge", learning_rate=lr, n_iter=1, classes=classes, sample_weight=None, coef_init=None, intercept_init=None) def fit(self, X, y, coef_init=None, intercept_init=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : numpy array of shape [n_samples] Target values coef_init : array, shape = [n_classes,n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [n_classes] The initial intercept to warm-start the optimization. Returns ------- self : returns an instance of self. """ lr = "pa1" if self.loss == "hinge" else "pa2" return self._fit(X, y, alpha=1.0, C=self.C, loss="hinge", learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init) class PassiveAggressiveRegressor(BaseSGDRegressor): """Passive Aggressive Regressor Read more in the :ref:`User Guide <passive_aggressive>`. Parameters ---------- C : float Maximum step size (regularization). Defaults to 1.0. epsilon : float If the difference between the current prediction and the correct label is below this threshold, the model is not updated. fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). Defaults to 5. shuffle : bool, default=True Whether or not the training data should be shuffled after each epoch. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level loss : string, optional The loss function to be used: epsilon_insensitive: equivalent to PA-I in the reference paper. squared_epsilon_insensitive: equivalent to PA-II in the reference paper. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. average : bool or int, optional When set to True, computes the averaged SGD weights and stores the result in the ``coef_`` attribute. If set to an int greater than 1, averaging will begin once the total number of samples seen reaches average. So average=10 will begin averaging after seeing 10 samples. .. versionadded:: 0.19 parameter *average* to use weights averaging in SGD Attributes ---------- coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\ n_features] Weights assigned to the features. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. See also -------- SGDRegressor References ---------- Online Passive-Aggressive Algorithms <http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf> K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006) """ def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, loss="epsilon_insensitive", epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False, average=False): super(PassiveAggressiveRegressor, self).__init__( penalty=None, l1_ratio=0, epsilon=epsilon, eta0=1.0, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, random_state=random_state, warm_start=warm_start, average=average) self.C = C self.loss = loss def partial_fit(self, X, y): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Subset of training data y : numpy array of shape [n_samples] Subset of target values Returns ------- self : returns an instance of self. """ lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" return self._partial_fit(X, y, alpha=1.0, C=self.C, loss="epsilon_insensitive", learning_rate=lr, n_iter=1, sample_weight=None, coef_init=None, intercept_init=None) def fit(self, X, y, coef_init=None, intercept_init=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : numpy array of shape [n_samples] Target values coef_init : array, shape = [n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [1] The initial intercept to warm-start the optimization. Returns ------- self : returns an instance of self. """ lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" return self._fit(X, y, alpha=1.0, C=self.C, loss="epsilon_insensitive", learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init)
bsd-3-clause
mfcabrera/luigi
examples/pyspark_wc.py
21
3380
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import luigi from luigi.s3 import S3Target from luigi.contrib.spark import SparkSubmitTask, PySparkTask class InlinePySparkWordCount(PySparkTask): """ This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task over the target data in :py:meth:`wordcount.input` (a file in S3) and writes the result into its :py:meth:`wordcount.output` target (a file in S3). This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`. Example luigi configuration:: [spark] spark-submit: /usr/local/spark/bin/spark-submit master: spark://spark.example.org:7077 # py-packages: numpy, pandas """ driver_memory = '2g' executor_memory = '3g' def input(self): return S3Target("s3n://bucket.example.org/wordcount.input") def output(self): return S3Target('s3n://bucket.example.org/wordcount.output') def main(self, sc, *args): sc.textFile(self.input().path) \ .flatMap(lambda line: line.split()) \ .map(lambda word: (word, 1)) \ .reduceByKey(lambda a, b: a + b) \ .saveAsTextFile(self.output().path) class PySparkWordCount(SparkSubmitTask): """ This task is the same as :py:class:`InlinePySparkWordCount` above but uses an external python driver file specified in :py:meth:`app` It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task over the target data in :py:meth:`wordcount.input` (a file in S3) and writes the result into its :py:meth:`wordcount.output` target (a file in S3). This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`. Example luigi configuration:: [spark] spark-submit: /usr/local/spark/bin/spark-submit master: spark://spark.example.org:7077 deploy-mode: client """ driver_memory = '2g' executor_memory = '3g' total_executor_cores = luigi.IntParameter(default=100, significant=False) name = "PySpark Word Count" app = 'wordcount.py' def app_options(self): # These are passed to the Spark main args in the defined order. return [self.input().path, self.output().path] def input(self): return S3Target("s3n://bucket.example.org/wordcount.input") def output(self): return S3Target('s3n://bucket.example.org/wordcount.output') ''' // Corresponding example Spark Job, running Word count with Spark's Python API // This file would have to be saved into wordcount.py import sys from pyspark import SparkContext if __name__ == "__main__": sc = SparkContext() sc.textFile(sys.argv[1]) \ .flatMap(lambda line: line.split()) \ .map(lambda word: (word, 1)) \ .reduceByKey(lambda a, b: a + b) \ .saveAsTextFile(sys.argv[2]) '''
apache-2.0
DHI-GRAS/processing_SWAT
ASS_module1_PrepData.py
2
10910
""" *************************************************************************** ASS_module1_PrepData.py ------------------------------------- Copyright (C) 2014 TIGER-NET (www.tiger-net.org) *************************************************************************** * This plugin is part of the Water Observation Information System (WOIS) * * developed under the TIGER-NET project funded by the European Space * * Agency as part of the long-term TIGER initiative aiming at promoting * * the use of Earth Observation (EO) for improved Integrated Water * * Resources Management (IWRM) in Africa. * * * * WOIS is a free software i.e. you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published * * by the Free Software Foundation, either version 3 of the License, * * or (at your option) any later version. * * * * WOIS is distributed in the hope that it will be useful, but WITHOUT ANY * * WARRANTY; without even the implied warranty of MERCHANTABILITY or * * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * * for more details. * * * * You should have received a copy of the GNU General Public License along * * with this program. If not, see <http://www.gnu.org/licenses/>. * *************************************************************************** """ # Import modules from matplotlib.pylab import * import numpy import os import csv from datetime import date, timedelta from ASS_utilities import EstimateLosses def MakedrainsTo(src_folder): """Creating drainsTo from the fig.fig file""" filename = src_folder + os.sep + 'fig.fig' watershed_file = open(filename,'r').readlines() routes = {} add = {} for l in watershed_file: try: if int(l[11:16]) == 2: routes[int(l[16:22])] = [int(l[22:28]),int(l[28:34])] elif int(l[11:16]) == 5: add[int(l[16:22])] = [int(l[22:28]),int(l[28:34])] except: pass nbrch = min(routes.keys())-1 #The total number of reaches in the basin catch = {} for c in routes.keys(): reach = routes[c][0] inflow = routes[c][1] catch[reach] = [inflow] if reach == inflow: pass else: j = [1] while j: new_inflow = [] for n in catch[reach]: if n <= nbrch: new_inflow.append(n) else: try: for m in add[n]: new_inflow.append(m) except: new_inflow.append(routes[n][1]) catch[reach] = new_inflow j = [i for i in catch[reach] if i > nbrch] catchs=sorted(catch, key=lambda k: len(catch[k])) drainsTo_dict = {} for i in range(1,nbrch+1): tempIndex = [] for k in catchs: for m in range(0,len(catch[k])): if catch[k][m] == i: tempIndex.append(k) else: pass drainsTo_dict[i] = tempIndex drainsTo = numpy.zeros([nbrch]) for j in range (0,nbrch): if len(drainsTo_dict[j+1])>1: drainsTo[j] = (int(drainsTo_dict[j+1][1])) else: drainsTo[j] = 0 #outlets are assigned 0 return(drainsTo) def GetMuskingumParameters(nbrch,src_folder): """Getting the Muskingum parameters from the .rte and basins.bsn files""" #Getting the lengths, widths, depths, slope and Manning n of the reaches from the .rte files width = numpy.zeros([nbrch]) length = numpy.zeros([nbrch]) depth = numpy.zeros([nbrch]) slope = numpy.zeros([nbrch]) manning_n = numpy.zeros([nbrch]) for rch in range(1,nbrch+1): filename = src_folder + os.sep + str(rch).zfill(5).ljust(9,'0') +'.rte' if os.path.isfile(filename): rte_file = open(filename,'r') rte = rte_file.readlines() rte_file.close width[rch-1] = float(rte[1][0:20]) #Average width of main channel at top of bank [m] length[rch-1] = float(rte[4][0:20]) #Average length of main channel [km] depth[rch-1] = float(rte[2][0:20]) #Main channel depth, from top of bank to bottom [m] slope[rch-1] = float(rte[3][0:20]) #Main channel slope [m/m] manning_n[rch-1] = float(rte[5][0:20]) #Manning's nvalue for main channel #Getting the Muskingum parameters from the basins.bsn file if os.path.isfile(src_folder + os.sep + 'basins.bsn'): bsn_file = open(src_folder + os.sep + 'basins.bsn','r') bsn = bsn_file.readlines() bsn_file.close MSK_CO1 = float(bsn[58][0:20]) #Calibration coefficient used to control impact of the storage time constant (Km) for normal flow MSK_CO2 = float(bsn[59][0:20]) #Calibration coefficient used to control impact of the storage time constant (Km) for low flow MSK_X = float(bsn[60][0:20]) z_ch = 2. #p. 429 in the SWAT documentation width_btm = width -2*2*depth #Calculation the bottom withs for i in range(0,len(width_btm)): if width_btm[i]<=0: width_btm[i] = 0.5*width[i] else: pass depth_add = 1. #Depth added to bottom (after Milzow, 2010) A_ch = width_btm*depth+z_ch*depth*depth+depth_add*width_btm/2. #Cross sectional area of flow in the channel for modified channel geometry (see Milzow, 2010) A_ch_01 = width_btm*0.1*depth+z_ch*0.1*depth*0.1*depth+depth_add*width_btm/2. #Cross sectional area when flow is 0.1 bankfull for modified channel geometry (see Milzow, 2010) ## A_ch = (width_btm+z_ch*depth)*depth+0.5*width_btm #Cross sectional area of flow in the channel ## A_ch_01 = (width_btm+z_ch*0.1*depth)*0.1*depth+0.5*width_btm #Cross sectional area when flow is 0.1 bankfull P_ch = (width_btm*width_btm+(depth_add*z_ch)**2)**0.5+2*depth*(1+z_ch**2)**0.5 #Wetted perimeter of the channel for modified channel geometry (see Milzow, 2010) P_ch_01 = (width_btm*width_btm+(depth_add*z_ch)**2)**0.5+2*0.1*depth*(1+z_ch**2)**0.5 #Wetted perimeter of the channel when flow is 0.1 bankfull for modified channel geometry (see Milzow, 2010) R_ch = A_ch/P_ch #Hydraulic radius for bankfull flow R_ch_01 = A_ch_01/P_ch_01 #Hydraulic radius for 0.1 bankfull flow c_k = (5.0/3.0)*((R_ch**(2.0/3.0)*slope**(1.0/2.0))/manning_n) #Celerity for bankfull c_k_01 = (5.0/3.0)*((R_ch_01**(2.0/3.0)*slope**(1.0/2.0))/manning_n) #Celerity for 0.1 bankfull K_1 = 1000*length/c_k/86400 #K for bankfull in days K_01 = 1000*length/c_k_01/86400 #K for 0.1 bankfull in days msk1 = MSK_CO1/(MSK_CO1+MSK_CO2) # (msk_co1 + msk_co2 = 1.) See line 130 in rtmusk.f in SWAT source code msk2 = MSK_CO2/(MSK_CO1+MSK_CO2) MSK_CO1 = msk1 MSK_CO2 = msk2 K = (MSK_CO1*K_1+MSK_CO2*K_01) X = numpy.ones([nbrch])*MSK_X return(X,K) def GetInput(src_folder,nbrch,header,Startdate,Enddate): """Getting the runoff from the output.sub file""" filename = src_folder + os.sep + 'output.sub' deli= [6,4,9,6,10,10,10,10,10,10,10,10,10,10,10,10,10,10] area = numpy.genfromtxt(filename, delimiter=deli, skip_header = header, usecols=4) data = numpy.genfromtxt(filename, delimiter=deli, skip_header = header, usecols=13) totsub = nbrch #Total number of subbasins in the SWAT model days = int(Enddate-Startdate)+1 #Number of days for which the data should be read. Has to be <= than the total number of days simulated WYLD_Qmm = numpy.zeros([days,totsub]) for p in range(0,totsub): for i in range(0,days): WYLD_Qmm[i,p] = data[i*totsub+(p)] #Get WYLD in m3/day WYLD_Q = numpy.zeros([days,totsub]) RR = numpy.zeros([days,totsub]) for i in range(0,totsub): WYLD_Q[:,i] = WYLD_Qmm[:,i]*area[i]*1000/86400 #WYLD_Q in m3/s RR = WYLD_Q return(RR) def DefaultErrorModel(nbrch): """Create defaults for the error model""" alphaerr = numpy.ones([nbrch])*-99.0 q = identity(nbrch)*-99.0 return(alphaerr,q) def CreateTextFiles(nbrch, src_folder, Ass_folder, header, Startdate, Enddate): #Creating text files - Assimilation file and runoff files. Reach = arange(1,nbrch+1,1) (X,K) = GetMuskingumParameters(nbrch,src_folder) (drainsTo) = MakedrainsTo(src_folder) Runoff = [] for i in range(1,nbrch+1): Runoff.append('runoff' + str(i) +'.txt') (LOSS) = EstimateLosses(src_folder,Startdate,Enddate,nbrch) (alphaerr,q) = DefaultErrorModel(nbrch) with open(Ass_folder + os.sep + 'Assimilationfile.txt', 'wb') as csvfile: file_writer = csv.writer(csvfile, delimiter=' ') file_writer.writerow(['Reach'] + ['X'] + ['K'] + ['DrainsTo'] + ['Runoff'] +['alphaerr'] + ['Loss_fraction']) for j in range(0,nbrch): file_writer.writerow([str(Reach[j])]+ [str(X[j])]+[str(K[j])[0:10]]+[str(drainsTo[j])] +[str(Runoff[j])]+[str(alphaerr[j])] + [str(LOSS[j])]) with open(Ass_folder + os.sep + 'Assimilationfile_q.txt', 'wb') as csvfile: file_writer = csv.writer(csvfile, delimiter=' ') file_writer.writerow(['q']) for k in range(0,nbrch): file_writer.writerow(q[k]) (RR) = GetInput(src_folder,nbrch,header,Startdate,Enddate) days = int(Enddate - Startdate)+1 #Number of days for which the data should be read. startdate = num2date(Startdate) dates = [startdate + timedelta(days=i) for i in range(0,days)] RR_reach = numpy.zeros([days]) for j in range(0,nbrch): RR_reach = RR[0:days,j] f = open(Ass_folder + os.sep + 'runoff' + str(j+1) + ".txt", "w") f.write('Dates' + ' ' + 'Runoff [m3/s]'+ '\n') for k in range(0,len(RR_reach)): f.write(str(dates[k])[0:10]+ ' ' +str(RR_reach[k])+'\n') f.close
gpl-3.0
CforED/Machine-Learning
examples/linear_model/plot_ols_ridge_variance.py
387
2060
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Ordinary Least Squares and Ridge Regression Variance ========================================================= Due to the few points in each dimension and the straight line that linear regression uses to follow these points as well as it can, noise on the observations will cause great variance as shown in the first plot. Every line's slope can vary quite a bit for each prediction due to the noise induced in the observations. Ridge regression is basically minimizing a penalised version of the least-squared function. The penalising `shrinks` the value of the regression coefficients. Despite the few data points in each dimension, the slope of the prediction is much more stable and the variance in the line itself is greatly reduced, in comparison to that of the standard linear regression """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model X_train = np.c_[.5, 1].T y_train = [.5, 1] X_test = np.c_[0, 2].T np.random.seed(0) classifiers = dict(ols=linear_model.LinearRegression(), ridge=linear_model.Ridge(alpha=.1)) fignum = 1 for name, clf in classifiers.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.title(name) ax = plt.axes([.12, .12, .8, .8]) for _ in range(6): this_X = .1 * np.random.normal(size=(2, 1)) + X_train clf.fit(this_X, y_train) ax.plot(X_test, clf.predict(X_test), color='.5') ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10) clf.fit(X_train, y_train) ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue') ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10) ax.set_xticks(()) ax.set_yticks(()) ax.set_ylim((0, 1.6)) ax.set_xlabel('X') ax.set_ylabel('y') ax.set_xlim(0, 2) fignum += 1 plt.show()
bsd-3-clause
yonglehou/scikit-learn
examples/cluster/plot_kmeans_assumptions.py
270
2040
""" ==================================== Demonstration of k-means assumptions ==================================== This example is meant to illustrate situations where k-means will produce unintuitive and possibly unexpected clusters. In the first three plots, the input data does not conform to some implicit assumption that k-means makes and undesirable clusters are produced as a result. In the last plot, k-means returns intuitive clusters despite unevenly sized blobs. """ print(__doc__) # Author: Phil Roth <[email protected]> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs plt.figure(figsize=(12, 12)) n_samples = 1500 random_state = 170 X, y = make_blobs(n_samples=n_samples, random_state=random_state) # Incorrect number of clusters y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X) plt.subplot(221) plt.scatter(X[:, 0], X[:, 1], c=y_pred) plt.title("Incorrect Number of Blobs") # Anisotropicly distributed data transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso) plt.subplot(222) plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred) plt.title("Anisotropicly Distributed Blobs") # Different variance X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied) plt.subplot(223) plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred) plt.title("Unequal Variance") # Unevenly sized blobs X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered) plt.subplot(224) plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred) plt.title("Unevenly Sized Blobs") plt.show()
bsd-3-clause
jblackburne/scikit-learn
examples/text/document_classification_20newsgroups.py
37
10499
""" ====================================================== Classification of text documents using sparse features ====================================================== This is an example showing how scikit-learn can be used to classify documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features and demonstrates various classifiers that can efficiently handle sparse matrices. The dataset used in this example is the 20 newsgroups dataset. It will be automatically downloaded, then cached. The bar plot indicates the accuracy, training time (normalized) and test time (normalized) of each classifier. """ # Author: Peter Prettenhofer <[email protected]> # Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Lars Buitinck # License: BSD 3 clause from __future__ import print_function import logging import numpy as np from optparse import OptionParser import sys from time import time import matplotlib.pyplot as plt from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_selection import SelectKBest, chi2 from sklearn.linear_model import RidgeClassifier from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.linear_model import SGDClassifier from sklearn.linear_model import Perceptron from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import NearestCentroid from sklearn.ensemble import RandomForestClassifier from sklearn.utils.extmath import density from sklearn import metrics # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # parse commandline arguments op = OptionParser() op.add_option("--report", action="store_true", dest="print_report", help="Print a detailed classification report.") op.add_option("--chi2_select", action="store", type="int", dest="select_chi2", help="Select some number of features using a chi-squared test") op.add_option("--confusion_matrix", action="store_true", dest="print_cm", help="Print the confusion matrix.") op.add_option("--top10", action="store_true", dest="print_top10", help="Print ten most discriminative terms per class" " for every classifier.") op.add_option("--all_categories", action="store_true", dest="all_categories", help="Whether to use all categories or not.") op.add_option("--use_hashing", action="store_true", help="Use a hashing vectorizer.") op.add_option("--n_features", action="store", type=int, default=2 ** 16, help="n_features when using the hashing vectorizer.") op.add_option("--filtered", action="store_true", help="Remove newsgroup information that is easily overfit: " "headers, signatures, and quoting.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) print(__doc__) op.print_help() print() ############################################################################### # Load some categories from the training set if opts.all_categories: categories = None else: categories = [ 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space', ] if opts.filtered: remove = ('headers', 'footers', 'quotes') else: remove = () print("Loading 20 newsgroups dataset for categories:") print(categories if categories else "all") data_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42, remove=remove) data_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42, remove=remove) print('data loaded') categories = data_train.target_names # for case categories == None def size_mb(docs): return sum(len(s.encode('utf-8')) for s in docs) / 1e6 data_train_size_mb = size_mb(data_train.data) data_test_size_mb = size_mb(data_test.data) print("%d documents - %0.3fMB (training set)" % ( len(data_train.data), data_train_size_mb)) print("%d documents - %0.3fMB (test set)" % ( len(data_test.data), data_test_size_mb)) print("%d categories" % len(categories)) print() # split a training set and a test set y_train, y_test = data_train.target, data_test.target print("Extracting features from the training data using a sparse vectorizer") t0 = time() if opts.use_hashing: vectorizer = HashingVectorizer(stop_words='english', non_negative=True, n_features=opts.n_features) X_train = vectorizer.transform(data_train.data) else: vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') X_train = vectorizer.fit_transform(data_train.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_train.shape) print() print("Extracting features from the test data using the same vectorizer") t0 = time() X_test = vectorizer.transform(data_test.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_test.shape) print() # mapping from integer feature name to original token string if opts.use_hashing: feature_names = None else: feature_names = vectorizer.get_feature_names() if opts.select_chi2: print("Extracting %d best features by a chi-squared test" % opts.select_chi2) t0 = time() ch2 = SelectKBest(chi2, k=opts.select_chi2) X_train = ch2.fit_transform(X_train, y_train) X_test = ch2.transform(X_test) if feature_names: # keep selected feature names feature_names = [feature_names[i] for i in ch2.get_support(indices=True)] print("done in %fs" % (time() - t0)) print() if feature_names: feature_names = np.asarray(feature_names) def trim(s): """Trim string to fit on terminal (assuming 80-column display)""" return s if len(s) <= 80 else s[:77] + "..." ############################################################################### # Benchmark classifiers def benchmark(clf): print('_' * 80) print("Training: ") print(clf) t0 = time() clf.fit(X_train, y_train) train_time = time() - t0 print("train time: %0.3fs" % train_time) t0 = time() pred = clf.predict(X_test) test_time = time() - t0 print("test time: %0.3fs" % test_time) score = metrics.accuracy_score(y_test, pred) print("accuracy: %0.3f" % score) if hasattr(clf, 'coef_'): print("dimensionality: %d" % clf.coef_.shape[1]) print("density: %f" % density(clf.coef_)) if opts.print_top10 and feature_names is not None: print("top 10 keywords per class:") for i, category in enumerate(categories): top10 = np.argsort(clf.coef_[i])[-10:] print(trim("%s: %s" % (category, " ".join(feature_names[top10])))) print() if opts.print_report: print("classification report:") print(metrics.classification_report(y_test, pred, target_names=categories)) if opts.print_cm: print("confusion matrix:") print(metrics.confusion_matrix(y_test, pred)) print() clf_descr = str(clf).split('(')[0] return clf_descr, score, train_time, test_time results = [] for clf, name in ( (RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"), (Perceptron(n_iter=50), "Perceptron"), (PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"), (KNeighborsClassifier(n_neighbors=10), "kNN"), (RandomForestClassifier(n_estimators=100), "Random forest")): print('=' * 80) print(name) results.append(benchmark(clf)) for penalty in ["l2", "l1"]: print('=' * 80) print("%s penalty" % penalty.upper()) # Train Liblinear model results.append(benchmark(LinearSVC(loss='l2', penalty=penalty, dual=False, tol=1e-3))) # Train SGD model results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty=penalty))) # Train SGD with Elastic Net penalty print('=' * 80) print("Elastic-Net penalty") results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty="elasticnet"))) # Train NearestCentroid without threshold print('=' * 80) print("NearestCentroid (aka Rocchio classifier)") results.append(benchmark(NearestCentroid())) # Train sparse Naive Bayes classifiers print('=' * 80) print("Naive Bayes") results.append(benchmark(MultinomialNB(alpha=.01))) results.append(benchmark(BernoulliNB(alpha=.01))) print('=' * 80) print("LinearSVC with L1-based feature selection") # The smaller C, the stronger the regularization. # The more regularization, the more sparsity. results.append(benchmark(Pipeline([ ('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)), ('classification', LinearSVC()) ]))) # make some plots indices = np.arange(len(results)) results = [[x[i] for x in results] for i in range(4)] clf_names, score, training_time, test_time = results training_time = np.array(training_time) / np.max(training_time) test_time = np.array(test_time) / np.max(test_time) plt.figure(figsize=(12, 8)) plt.title("Score") plt.barh(indices, score, .2, label="score", color='navy') plt.barh(indices + .3, training_time, .2, label="training time", color='c') plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange') plt.yticks(()) plt.legend(loc='best') plt.subplots_adjust(left=.25) plt.subplots_adjust(top=.95) plt.subplots_adjust(bottom=.05) for i, c in zip(indices, clf_names): plt.text(-.3, i, c) plt.show()
bsd-3-clause
osPlanning/activitysim
activitysim/defaults/datasources.py
1
1784
import numpy as np import pandas as pd import os import uuid import yaml from urbansim.utils import misc import urbansim.sim.simulation as sim from .. import activitysim as asim import warnings warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning) pd.options.mode.chained_assignment = None @sim.injectable('settings', cache=True) def settings(): with open(os.path.join(misc.configs_dir(), "settings.yaml")) as f: settings = yaml.load(f) # monkey patch on the settings object since it's pretty global # but will also be available as injectable sim.settings = settings return settings @sim.injectable('run_number') def run_number(): return misc.get_run_number() @sim.injectable('uuid', cache=True) def uuid_hex(): return uuid.uuid4().hex @sim.injectable('store', cache=True) def hdfstore(settings): return pd.HDFStore( os.path.join(misc.data_dir(), settings["store"]), mode='r') @sim.injectable("scenario") def scenario(settings): return settings["scenario"] @sim.table(cache=True) def land_use(store): return store["land_use/taz_data"] @sim.table(cache=True) def accessibility(store): df = store["skims/accessibility"] df.columns = [c.upper() for c in df.columns] return df @sim.table(cache=True) def households(store, settings): if "households_sample_size" in settings: return asim.random_rows(store["households"], settings["households_sample_size"]) return store["households"] @sim.table(cache=True) def persons(store): return store["persons"] sim.broadcast('land_use', 'households', cast_index=True, onto_on='TAZ') sim.broadcast('accessibility', 'households', cast_index=True, onto_on='TAZ')
bsd-3-clause
hunse/cuda-convnet2
translation-invariance.py
1
2856
""" See how translation-invariant features in a network are """ import numpy as np from run_core import load_network from run_numpy import compute_target_layer if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description="Run network in Numpy") parser.add_argument('loadfile', help="Checkpoint to load") parser.add_argument('--n', type=int, help="Number of images to test") args = parser.parse_args() layers, data, dp = load_network(args.loadfile) # --- get data bidx = 0 assert dp.batches_generated > bidx n_images = 1000 r = 2 nc, nx, ny = dp.num_colors, dp.img_size, dp.inner_size assert (nx - ny) % 2 == 0 p = (nx - ny) / 2 assert r <= p raw = dp.data_dic[bidx]['data'].astype('float32').T.reshape(-1, nc, nx, nx) labels = dp.data_dic[bidx]['labels'].astype('int32') raw, labels = raw[:n_images], labels[:n_images] irange, jrange = np.arange(-r, r+1), np.arange(-r, r+1) ni, nj = len(irange), len(jrange) nij = ni * nj images = np.zeros((raw.shape[0], ni, nj, nc, ny, ny), dtype=raw.dtype) for ii, i in enumerate(irange): for jj, j in enumerate(jrange): images[:, ii, jj, :, :, :] = raw[:, :, p+i:p+i+ny, p+j:p+j+ny] del raw images = images.reshape(-1, nc, ny, ny) images -= dp.data_mean.T.reshape(1, nc, ny, ny) labels = labels.repeat(nij) # import matplotlib.pyplot as plt # from hunse_tools.plotting import tile # tile(np.transpose(images, (0, 2, 3, 1)) / 255., rows=10, cols=nij) # plt.show() data = [images, labels] # print(data[0].shape, data[1].shape) # --- compute outputs outputs = compute_target_layer('logprob', layers, data) display_layers = [name for name, layer in layers.items() if layer['type'] in ('fc', 'conv', 'local', 'pool', 'neuron')] for name in sorted(display_layers): output = outputs[name] output = output.reshape(n_images, ni, nj, -1) # subtract out centre output = output - output[:, r:r+1, r:r+1, :] # vertical invariance for each feature inv_v = output.std(axis=1) # inv_v[(output == 0).all(axis=1)] = np.nan # some component must be on inv_v[(output == 0).any(axis=1)] = np.nan # all components must be on # horizontal invariance for each feature inv_h = output.std(axis=2) # inv_h[(output == 0).all(axis=2)] = np.nan # some component must be on inv_h[(output == 0).any(axis=2)] = np.nan # all components must be on # inv = np.sqrt(inv_v**2 + inv_h**2) inv_v = np.nanmean(inv_v, axis=(0, 1)) inv_h = np.nanmean(inv_h, axis=(0, 1)) print("%s: %0.3f (%0.3f), %0.3f (%0.3f)" % ( name, inv_v.mean(), inv_v.std(), inv_h.mean(), inv_h.std()))
apache-2.0
lancezlin/ml_template_py
lib/python2.7/site-packages/sklearn/datasets/samples_generator.py
7
56557
""" Generate samples of synthetic data sets. """ # Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel, # G. Louppe, J. Nothman # License: BSD 3 clause import numbers import array import numpy as np from scipy import linalg import scipy.sparse as sp from ..preprocessing import MultiLabelBinarizer from ..utils import check_array, check_random_state from ..utils import shuffle as util_shuffle from ..utils.fixes import astype from ..utils.random import sample_without_replacement from ..externals import six map = six.moves.map zip = six.moves.zip def _generate_hypercube(samples, dimensions, rng): """Returns distinct binary samples of length dimensions """ if dimensions > 30: return np.hstack([_generate_hypercube(samples, dimensions - 30, rng), _generate_hypercube(samples, 30, rng)]) out = astype(sample_without_replacement(2 ** dimensions, samples, random_state=rng), dtype='>u4', copy=False) out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:] return out def make_classification(n_samples=100, n_features=20, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None): """Generate a random n-class classification problem. This initially creates clusters of points normally distributed (std=1) about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal number of clusters to each class. It introduces interdependence between these features and adds various types of further noise to the data. Prior to shuffling, `X` stacks a number of these primary "informative" features, "redundant" linear combinations of these, "repeated" duplicates of sampled features, and arbitrary noise for and remaining features. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. These comprise `n_informative` informative features, `n_redundant` redundant features, `n_repeated` duplicated features and `n_features-n_informative-n_redundant- n_repeated` useless features drawn at random. n_informative : int, optional (default=2) The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension `n_informative`. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined within each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, optional (default=2) The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, optional (default=0) The number of duplicated features, drawn randomly from the informative and the redundant features. n_classes : int, optional (default=2) The number of classes (or labels) of the classification problem. n_clusters_per_class : int, optional (default=2) The number of clusters per class. weights : list of floats or None (default=None) The proportions of samples assigned to each class. If None, then classes are balanced. Note that if `len(weights) == n_classes - 1`, then the last class weight is automatically inferred. More than `n_samples` samples may be returned if the sum of `weights` exceeds 1. flip_y : float, optional (default=0.01) The fraction of samples whose class are randomly exchanged. class_sep : float, optional (default=1.0) The factor multiplying the hypercube dimension. hypercube : boolean, optional (default=True) If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float, array of shape [n_features] or None, optional (default=0.0) Shift features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float, array of shape [n_features] or None, optional (default=1.0) Multiply features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : boolean, optional (default=True) Shuffle the samples and the features. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for class membership of each sample. Notes ----- The algorithm is adapted from Guyon [1] and was designed to generate the "Madelon" dataset. References ---------- .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable selection benchmark", 2003. See also -------- make_blobs: simplified variant make_multilabel_classification: unrelated generator for multilabel tasks """ generator = check_random_state(random_state) # Count features, clusters and samples if n_informative + n_redundant + n_repeated > n_features: raise ValueError("Number of informative, redundant and repeated " "features must sum to less than the number of total" " features") if 2 ** n_informative < n_classes * n_clusters_per_class: raise ValueError("n_classes * n_clusters_per_class must" " be smaller or equal 2 ** n_informative") if weights and len(weights) not in [n_classes, n_classes - 1]: raise ValueError("Weights specified but incompatible with number " "of classes.") n_useless = n_features - n_informative - n_redundant - n_repeated n_clusters = n_classes * n_clusters_per_class if weights and len(weights) == (n_classes - 1): weights.append(1.0 - sum(weights)) if weights is None: weights = [1.0 / n_classes] * n_classes weights[-1] = 1.0 - sum(weights[:-1]) # Distribute samples among clusters by weight n_samples_per_cluster = [] for k in range(n_clusters): n_samples_per_cluster.append(int(n_samples * weights[k % n_classes] / n_clusters_per_class)) for i in range(n_samples - sum(n_samples_per_cluster)): n_samples_per_cluster[i % n_clusters] += 1 # Initialize X and y X = np.zeros((n_samples, n_features)) y = np.zeros(n_samples, dtype=np.int) # Build the polytope whose vertices become cluster centroids centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(float) centroids *= 2 * class_sep centroids -= class_sep if not hypercube: centroids *= generator.rand(n_clusters, 1) centroids *= generator.rand(1, n_informative) # Initially draw informative features from the standard normal X[:, :n_informative] = generator.randn(n_samples, n_informative) # Create each cluster; a variant of make_blobs stop = 0 for k, centroid in enumerate(centroids): start, stop = stop, stop + n_samples_per_cluster[k] y[start:stop] = k % n_classes # assign labels X_k = X[start:stop, :n_informative] # slice a view of the cluster A = 2 * generator.rand(n_informative, n_informative) - 1 X_k[...] = np.dot(X_k, A) # introduce random covariance X_k += centroid # shift the cluster to a vertex # Create redundant features if n_redundant > 0: B = 2 * generator.rand(n_informative, n_redundant) - 1 X[:, n_informative:n_informative + n_redundant] = \ np.dot(X[:, :n_informative], B) # Repeat some features if n_repeated > 0: n = n_informative + n_redundant indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp) X[:, n:n + n_repeated] = X[:, indices] # Fill useless features if n_useless > 0: X[:, -n_useless:] = generator.randn(n_samples, n_useless) # Randomly replace labels if flip_y >= 0.0: flip_mask = generator.rand(n_samples) < flip_y y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum()) # Randomly shift and scale if shift is None: shift = (2 * generator.rand(n_features) - 1) * class_sep X += shift if scale is None: scale = 1 + 100 * generator.rand(n_features) X *= scale if shuffle: # Randomly permute samples X, y = util_shuffle(X, y, random_state=generator) # Randomly permute features indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] return X, y def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=True, sparse=False, return_indicator='dense', return_distributions=False, random_state=None): """Generate a random multilabel classification problem. For each sample, the generative process is: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is never zero or more than `n_classes`, and that the document length is never zero. Likewise, we reject classes which have already been chosen. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. n_classes : int, optional (default=5) The number of classes of the classification problem. n_labels : int, optional (default=2) The average number of labels per instance. More precisely, the number of labels per sample is drawn from a Poisson distribution with ``n_labels`` as its expected value, but samples are bounded (using rejection sampling) by ``n_classes``, and must be nonzero if ``allow_unlabeled`` is False. length : int, optional (default=50) The sum of the features (number of words if documents) is drawn from a Poisson distribution with this expected value. allow_unlabeled : bool, optional (default=True) If ``True``, some instances might not belong to any class. sparse : bool, optional (default=False) If ``True``, return a sparse feature matrix .. versionadded:: 0.17 parameter to allow *sparse* output. return_indicator : 'dense' (default) | 'sparse' | False If ``dense`` return ``Y`` in the dense binary indicator format. If ``'sparse'`` return ``Y`` in the sparse binary indicator format. ``False`` returns a list of lists of labels. return_distributions : bool, optional (default=False) If ``True``, return the prior class probability and conditional probabilities of features given classes, from which the data was drawn. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. Y : array or sparse CSR matrix of shape [n_samples, n_classes] The label sets. p_c : array, shape [n_classes] The probability of each class being drawn. Only returned if ``return_distributions=True``. p_w_c : array, shape [n_features, n_classes] The probability of each feature being drawn given each class. Only returned if ``return_distributions=True``. """ generator = check_random_state(random_state) p_c = generator.rand(n_classes) p_c /= p_c.sum() cumulative_p_c = np.cumsum(p_c) p_w_c = generator.rand(n_features, n_classes) p_w_c /= np.sum(p_w_c, axis=0) def sample_example(): _, n_classes = p_w_c.shape # pick a nonzero number of labels per document by rejection sampling y_size = n_classes + 1 while (not allow_unlabeled and y_size == 0) or y_size > n_classes: y_size = generator.poisson(n_labels) # pick n classes y = set() while len(y) != y_size: # pick a class with probability P(c) c = np.searchsorted(cumulative_p_c, generator.rand(y_size - len(y))) y.update(c) y = list(y) # pick a non-zero document length by rejection sampling n_words = 0 while n_words == 0: n_words = generator.poisson(length) # generate a document of length n_words if len(y) == 0: # if sample does not belong to any class, generate noise word words = generator.randint(n_features, size=n_words) return words, y # sample words with replacement from selected classes cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum() cumulative_p_w_sample /= cumulative_p_w_sample[-1] words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words)) return words, y X_indices = array.array('i') X_indptr = array.array('i', [0]) Y = [] for i in range(n_samples): words, y = sample_example() X_indices.extend(words) X_indptr.append(len(X_indices)) Y.append(y) X_data = np.ones(len(X_indices), dtype=np.float64) X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features)) X.sum_duplicates() if not sparse: X = X.toarray() # return_indicator can be True due to backward compatibility if return_indicator in (True, 'sparse', 'dense'): lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse')) Y = lb.fit([range(n_classes)]).transform(Y) elif return_indicator is not False: raise ValueError("return_indicator must be either 'sparse', 'dense' " 'or False.') if return_distributions: return X, Y, p_c, p_w_c return X, Y def make_hastie_10_2(n_samples=12000, random_state=None): """Generates data for binary classification used in Hastie et al. 2009, Example 10.2. The ten features are standard independent Gaussian and the target ``y`` is defined by:: y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1 Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=12000) The number of samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 10] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. See also -------- make_gaussian_quantiles: a generalization of this dataset approach """ rs = check_random_state(random_state) shape = (n_samples, 10) X = rs.normal(size=shape).reshape(shape) y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64) y[y == 0.0] = -1.0 return X, y def make_regression(n_samples=100, n_features=100, n_informative=10, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None): """Generate a random regression problem. The input set can either be well conditioned (by default) or have a low rank-fat tail singular profile. See :func:`make_low_rank_matrix` for more details. The output is generated by applying a (potentially biased) random linear regression model with `n_informative` nonzero regressors to the previously generated input and some gaussian centered noise with some adjustable scale. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. n_informative : int, optional (default=10) The number of informative features, i.e., the number of features used to build the linear model used to generate the output. n_targets : int, optional (default=1) The number of regression targets, i.e., the dimension of the y output vector associated with a sample. By default, the output is a scalar. bias : float, optional (default=0.0) The bias term in the underlying linear model. effective_rank : int or None, optional (default=None) if not None: The approximate number of singular vectors required to explain most of the input data by linear combinations. Using this kind of singular spectrum in the input allows the generator to reproduce the correlations often observed in practice. if None: The input set is well conditioned, centered and gaussian with unit variance. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile if `effective_rank` is not None. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. shuffle : boolean, optional (default=True) Shuffle the samples and the features. coef : boolean, optional (default=False) If True, the coefficients of the underlying linear model are returned. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] or [n_samples, n_targets] The output values. coef : array of shape [n_features] or [n_features, n_targets], optional The coefficient of the underlying linear model. It is returned only if coef is True. """ n_informative = min(n_features, n_informative) generator = check_random_state(random_state) if effective_rank is None: # Randomly generate a well conditioned input set X = generator.randn(n_samples, n_features) else: # Randomly generate a low rank, fat tail input set X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=effective_rank, tail_strength=tail_strength, random_state=generator) # Generate a ground truth model with only n_informative features being non # zeros (the other features are not correlated to y and should be ignored # by a sparsifying regularizers such as L1 or elastic net) ground_truth = np.zeros((n_features, n_targets)) ground_truth[:n_informative, :] = 100 * generator.rand(n_informative, n_targets) y = np.dot(X, ground_truth) + bias # Add noise if noise > 0.0: y += generator.normal(scale=noise, size=y.shape) # Randomly permute samples and features if shuffle: X, y = util_shuffle(X, y, random_state=generator) indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] ground_truth = ground_truth[indices] y = np.squeeze(y) if coef: return X, y, np.squeeze(ground_truth) else: return X, y def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None, factor=.8): """Make a large circle containing a smaller circle in 2d. A simple toy dataset to visualize clustering and classification algorithms. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The total number of points generated. shuffle: bool, optional (default=True) Whether to shuffle the samples. noise : double or None (default=None) Standard deviation of Gaussian noise added to the data. factor : double < 1 (default=.8) Scale factor between inner and outer circle. Returns ------- X : array of shape [n_samples, 2] The generated samples. y : array of shape [n_samples] The integer labels (0 or 1) for class membership of each sample. """ if factor > 1 or factor < 0: raise ValueError("'factor' has to be between 0 and 1.") generator = check_random_state(random_state) # so as not to have the first point = last point, we add one and then # remove it. linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1] outer_circ_x = np.cos(linspace) outer_circ_y = np.sin(linspace) inner_circ_x = outer_circ_x * factor inner_circ_y = outer_circ_y * factor X = np.vstack((np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y))).T y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp), np.ones(n_samples // 2, dtype=np.intp)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if noise is not None: X += generator.normal(scale=noise, size=X.shape) return X, y def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None): """Make two interleaving half circles A simple toy dataset to visualize clustering and classification algorithms. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The total number of points generated. shuffle : bool, optional (default=True) Whether to shuffle the samples. noise : double or None (default=None) Standard deviation of Gaussian noise added to the data. Returns ------- X : array of shape [n_samples, 2] The generated samples. y : array of shape [n_samples] The integer labels (0 or 1) for class membership of each sample. """ n_samples_out = n_samples // 2 n_samples_in = n_samples - n_samples_out generator = check_random_state(random_state) outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out)) outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out)) inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in)) inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5 X = np.vstack((np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y))).T y = np.hstack([np.zeros(n_samples_in, dtype=np.intp), np.ones(n_samples_out, dtype=np.intp)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if noise is not None: X += generator.normal(scale=noise, size=X.shape) return X, y def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None): """Generate isotropic Gaussian blobs for clustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The total number of points equally divided among clusters. n_features : int, optional (default=2) The number of features for each sample. centers : int or array of shape [n_centers, n_features], optional (default=3) The number of centers to generate, or the fixed center locations. cluster_std : float or sequence of floats, optional (default=1.0) The standard deviation of the clusters. center_box : pair of floats (min, max), optional (default=(-10.0, 10.0)) The bounding box for each cluster center when centers are generated at random. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for cluster membership of each sample. Examples -------- >>> from sklearn.datasets.samples_generator import make_blobs >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, ... random_state=0) >>> print(X.shape) (10, 2) >>> y array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0]) See also -------- make_classification: a more intricate variant """ generator = check_random_state(random_state) if isinstance(centers, numbers.Integral): centers = generator.uniform(center_box[0], center_box[1], size=(centers, n_features)) else: centers = check_array(centers) n_features = centers.shape[1] if isinstance(cluster_std, numbers.Real): cluster_std = np.ones(len(centers)) * cluster_std X = [] y = [] n_centers = centers.shape[0] n_samples_per_center = [int(n_samples // n_centers)] * n_centers for i in range(n_samples % n_centers): n_samples_per_center[i] += 1 for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)): X.append(centers[i] + generator.normal(scale=std, size=(n, n_features))) y += [i] * n X = np.concatenate(X) y = np.array(y) if shuffle: indices = np.arange(n_samples) generator.shuffle(indices) X = X[indices] y = y[indices] return X, y def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None): """Generate the "Friedman \#1" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are independent features uniformly distributed on the interval [0, 1]. The output `y` is created according to the formula:: y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1). Out of the `n_features` features, only 5 are actually used to compute `y`. The remaining features are independent of `y`. The number of features has to be >= 5. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=10) The number of features. Should be at least 5. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ if n_features < 5: raise ValueError("n_features must be at least five.") generator = check_random_state(random_state) X = generator.rand(n_samples, n_features) y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples) return X, y def make_friedman2(n_samples=100, noise=0.0, random_state=None): """Generate the "Friedman \#2" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \ - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \ + noise * generator.randn(n_samples) return X, y def make_friedman3(n_samples=100, noise=0.0, random_state=None): """Generate the "Friedman \#3" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \ / X[:, 0]) + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \ + noise * generator.randn(n_samples) return X, y def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10, tail_strength=0.5, random_state=None): """Generate a mostly low rank matrix with bell-shaped singular values Most of the variance can be explained by a bell-shaped curve of width effective_rank: the low rank part of the singular values profile is:: (1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2) The remaining singular values' tail is fat, decreasing as:: tail_strength * exp(-0.1 * i / effective_rank). The low rank part of the profile can be considered the structured signal part of the data while the tail can be considered the noisy part of the data that cannot be summarized by a low number of linear components (singular vectors). This kind of singular profiles is often seen in practice, for instance: - gray level pictures of faces - TF-IDF vectors of text documents crawled from the web Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. effective_rank : int, optional (default=10) The approximate number of singular vectors required to explain most of the data by linear combinations. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The matrix. """ generator = check_random_state(random_state) n = min(n_samples, n_features) # Random (ortho normal) vectors u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic') v, _ = linalg.qr(generator.randn(n_features, n), mode='economic') # Index of the singular values singular_ind = np.arange(n, dtype=np.float64) # Build the singular profile by assembling signal and noise components low_rank = ((1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2)) tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank) s = np.identity(n) * (low_rank + tail) return np.dot(np.dot(u, s), v.T) def make_sparse_coded_signal(n_samples, n_components, n_features, n_nonzero_coefs, random_state=None): """Generate a signal as a sparse combination of dictionary elements. Returns a matrix Y = DX, such as D is (n_features, n_components), X is (n_components, n_samples) and each column of X has exactly n_nonzero_coefs non-zero elements. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int number of samples to generate n_components: int, number of components in the dictionary n_features : int number of features of the dataset to generate n_nonzero_coefs : int number of active (non-zero) coefficients in each sample random_state : int or RandomState instance, optional (default=None) seed used by the pseudo random number generator Returns ------- data : array of shape [n_features, n_samples] The encoded signal (Y). dictionary : array of shape [n_features, n_components] The dictionary with normalized components (D). code : array of shape [n_components, n_samples] The sparse code such that each column of this matrix has exactly n_nonzero_coefs non-zero items (X). """ generator = check_random_state(random_state) # generate dictionary D = generator.randn(n_features, n_components) D /= np.sqrt(np.sum((D ** 2), axis=0)) # generate code X = np.zeros((n_components, n_samples)) for i in range(n_samples): idx = np.arange(n_components) generator.shuffle(idx) idx = idx[:n_nonzero_coefs] X[idx, i] = generator.randn(n_nonzero_coefs) # encode signal Y = np.dot(D, X) return map(np.squeeze, (Y, D, X)) def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None): """Generate a random regression problem with sparse uncorrelated design This dataset is described in Celeux et al [1]. as:: X ~ N(0, 1) y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3] Only the first 4 features are informative. The remaining features are useless. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=10) The number of features. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert, "Regularization in regression: comparing Bayesian and frequentist methods in a poorly informative situation", 2009. """ generator = check_random_state(random_state) X = generator.normal(loc=0, scale=1, size=(n_samples, n_features)) y = generator.normal(loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]), scale=np.ones(n_samples)) return X, y def make_spd_matrix(n_dim, random_state=None): """Generate a random symmetric, positive-definite matrix. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_dim : int The matrix dimension. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_dim, n_dim] The random symmetric, positive-definite matrix. See also -------- make_sparse_spd_matrix """ generator = check_random_state(random_state) A = generator.rand(n_dim, n_dim) U, s, V = linalg.svd(np.dot(A.T, A)) X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V) return X def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False, smallest_coef=.1, largest_coef=.9, random_state=None): """Generate a sparse symmetric definite positive matrix. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- dim : integer, optional (default=1) The size of the random matrix to generate. alpha : float between 0 and 1, optional (default=0.95) The probability that a coefficient is zero (see notes). Larger values enforce more sparsity. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. largest_coef : float between 0 and 1, optional (default=0.9) The value of the largest coefficient. smallest_coef : float between 0 and 1, optional (default=0.1) The value of the smallest coefficient. norm_diag : boolean, optional (default=False) Whether to normalize the output matrix to make the leading diagonal elements all 1 Returns ------- prec : sparse matrix of shape (dim, dim) The generated matrix. Notes ----- The sparsity is actually imposed on the cholesky factor of the matrix. Thus alpha does not translate directly into the filling fraction of the matrix itself. See also -------- make_spd_matrix """ random_state = check_random_state(random_state) chol = -np.eye(dim) aux = random_state.rand(dim, dim) aux[aux < alpha] = 0 aux[aux > alpha] = (smallest_coef + (largest_coef - smallest_coef) * random_state.rand(np.sum(aux > alpha))) aux = np.tril(aux, k=-1) # Permute the lines: we don't want to have asymmetries in the final # SPD matrix permutation = random_state.permutation(dim) aux = aux[permutation].T[permutation] chol += aux prec = np.dot(chol.T, chol) if norm_diag: # Form the diagonal vector into a row matrix d = np.diag(prec).reshape(1, prec.shape[0]) d = 1. / np.sqrt(d) prec *= d prec *= d.T return prec def make_swiss_roll(n_samples=100, noise=0.0, random_state=None): """Generate a swiss roll dataset. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of sample points on the S curve. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. Notes ----- The algorithm is from Marsland [1]. References ---------- .. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", Chapter 10, 2009. http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py """ generator = check_random_state(random_state) t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples)) x = t * np.cos(t) y = 21 * generator.rand(1, n_samples) z = t * np.sin(t) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t def make_s_curve(n_samples=100, noise=0.0, random_state=None): """Generate an S curve dataset. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of sample points on the S curve. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. """ generator = check_random_state(random_state) t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5) x = np.sin(t) y = 2.0 * generator.rand(1, n_samples) z = np.sign(t) * (np.cos(t) - 1) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t def make_gaussian_quantiles(mean=None, cov=1., n_samples=100, n_features=2, n_classes=3, shuffle=True, random_state=None): """Generate isotropic Gaussian and label samples by quantile This classification dataset is constructed by taking a multi-dimensional standard normal distribution and defining classes separated by nested concentric multi-dimensional spheres such that roughly equal numbers of samples are in each class (quantiles of the :math:`\chi^2` distribution). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- mean : array of shape [n_features], optional (default=None) The mean of the multi-dimensional normal distribution. If None then use the origin (0, 0, ...). cov : float, optional (default=1.) The covariance matrix will be this value times the unit matrix. This dataset only produces symmetric normal distributions. n_samples : int, optional (default=100) The total number of points equally divided among classes. n_features : int, optional (default=2) The number of features for each sample. n_classes : int, optional (default=3) The number of classes shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for quantile membership of each sample. Notes ----- The dataset is from Zhu et al [1]. References ---------- .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ if n_samples < n_classes: raise ValueError("n_samples must be at least n_classes") generator = check_random_state(random_state) if mean is None: mean = np.zeros(n_features) else: mean = np.array(mean) # Build multivariate normal distribution X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,)) # Sort by distance from origin idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1)) X = X[idx, :] # Label by quantile step = n_samples // n_classes y = np.hstack([np.repeat(np.arange(n_classes), step), np.repeat(n_classes - 1, n_samples - step * n_classes)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) return X, y def _shuffle(data, random_state=None): generator = check_random_state(random_state) n_rows, n_cols = data.shape row_idx = generator.permutation(n_rows) col_idx = generator.permutation(n_cols) result = data[row_idx][:, col_idx] return result, row_idx, col_idx def make_biclusters(shape, n_clusters, noise=0.0, minval=10, maxval=100, shuffle=True, random_state=None): """Generate an array with constant block diagonal structure for biclustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- shape : iterable (n_rows, n_cols) The shape of the result. n_clusters : integer The number of biclusters. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. minval : int, optional (default=10) Minimum value of a bicluster. maxval : int, optional (default=100) Maximum value of a bicluster. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape `shape` The generated array. rows : array of shape (n_clusters, X.shape[0],) The indicators for cluster membership of each row. cols : array of shape (n_clusters, X.shape[1],) The indicators for cluster membership of each column. References ---------- .. [1] Dhillon, I. S. (2001, August). Co-clustering documents and words using bipartite spectral graph partitioning. In Proceedings of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 269-274). ACM. See also -------- make_checkerboard """ generator = check_random_state(random_state) n_rows, n_cols = shape consts = generator.uniform(minval, maxval, n_clusters) # row and column clusters of approximately equal sizes row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters)) col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters)) row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes))) col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes))) result = np.zeros(shape, dtype=np.float64) for i in range(n_clusters): selector = np.outer(row_labels == i, col_labels == i) result[selector] += consts[i] if noise > 0: result += generator.normal(scale=noise, size=result.shape) if shuffle: result, row_idx, col_idx = _shuffle(result, random_state) row_labels = row_labels[row_idx] col_labels = col_labels[col_idx] rows = np.vstack(row_labels == c for c in range(n_clusters)) cols = np.vstack(col_labels == c for c in range(n_clusters)) return result, rows, cols def make_checkerboard(shape, n_clusters, noise=0.0, minval=10, maxval=100, shuffle=True, random_state=None): """Generate an array with block checkerboard structure for biclustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- shape : iterable (n_rows, n_cols) The shape of the result. n_clusters : integer or iterable (n_row_clusters, n_column_clusters) The number of row and column clusters. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. minval : int, optional (default=10) Minimum value of a bicluster. maxval : int, optional (default=100) Maximum value of a bicluster. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape `shape` The generated array. rows : array of shape (n_clusters, X.shape[0],) The indicators for cluster membership of each row. cols : array of shape (n_clusters, X.shape[1],) The indicators for cluster membership of each column. References ---------- .. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003). Spectral biclustering of microarray data: coclustering genes and conditions. Genome research, 13(4), 703-716. See also -------- make_biclusters """ generator = check_random_state(random_state) if hasattr(n_clusters, "__len__"): n_row_clusters, n_col_clusters = n_clusters else: n_row_clusters = n_col_clusters = n_clusters # row and column clusters of approximately equal sizes n_rows, n_cols = shape row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters)) col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters)) row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes))) col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes))) result = np.zeros(shape, dtype=np.float64) for i in range(n_row_clusters): for j in range(n_col_clusters): selector = np.outer(row_labels == i, col_labels == j) result[selector] += generator.uniform(minval, maxval) if noise > 0: result += generator.normal(scale=noise, size=result.shape) if shuffle: result, row_idx, col_idx = _shuffle(result, random_state) row_labels = row_labels[row_idx] col_labels = col_labels[col_idx] rows = np.vstack(row_labels == label for label in range(n_row_clusters) for _ in range(n_col_clusters)) cols = np.vstack(col_labels == label for _ in range(n_row_clusters) for label in range(n_col_clusters)) return result, rows, cols
mit
FrederichCheng/incubator-superset
tests/celery_tests.py
8
11738
"""Unit tests for Superset Celery worker""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import json import os import subprocess import time import unittest from past.builtins import basestring import pandas as pd from superset import app, appbuilder, cli, db, dataframe from superset.models import core as models from superset.models.helpers import QueryStatus from superset.models.sql_lab import Query from superset.security import sync_role_definitions from superset.sql_parse import SupersetQuery from .base_tests import SupersetTestCase BASE_DIR = app.config.get('BASE_DIR') class CeleryConfig(object): BROKER_URL = 'sqla+sqlite:///' + app.config.get('SQL_CELERY_DB_FILE_PATH') CELERY_IMPORTS = ('superset.sql_lab', ) CELERY_RESULT_BACKEND = 'db+sqlite:///' + app.config.get('SQL_CELERY_RESULTS_DB_FILE_PATH') CELERY_ANNOTATIONS = {'sql_lab.add': {'rate_limit': '10/s'}} CONCURRENCY = 1 app.config['CELERY_CONFIG'] = CeleryConfig class UtilityFunctionTests(SupersetTestCase): # TODO(bkyryliuk): support more cases in CTA function. def test_create_table_as(self): q = SupersetQuery("SELECT * FROM outer_space;") self.assertEqual( "CREATE TABLE tmp AS \nSELECT * FROM outer_space", q.as_create_table("tmp")) self.assertEqual( "DROP TABLE IF EXISTS tmp;\n" "CREATE TABLE tmp AS \nSELECT * FROM outer_space", q.as_create_table("tmp", overwrite=True)) # now without a semicolon q = SupersetQuery("SELECT * FROM outer_space") self.assertEqual( "CREATE TABLE tmp AS \nSELECT * FROM outer_space", q.as_create_table("tmp")) # now a multi-line query multi_line_query = ( "SELECT * FROM planets WHERE\n" "Luke_Father = 'Darth Vader'") q = SupersetQuery(multi_line_query) self.assertEqual( "CREATE TABLE tmp AS \nSELECT * FROM planets WHERE\n" "Luke_Father = 'Darth Vader'", q.as_create_table("tmp") ) class CeleryTestCase(SupersetTestCase): def __init__(self, *args, **kwargs): super(CeleryTestCase, self).__init__(*args, **kwargs) self.client = app.test_client() def get_query_by_name(self, sql): session = db.session query = session.query(Query).filter_by(sql=sql).first() session.close() return query def get_query_by_id(self, id): session = db.session query = session.query(Query).filter_by(id=id).first() session.close() return query @classmethod def setUpClass(cls): try: os.remove(app.config.get('SQL_CELERY_DB_FILE_PATH')) except OSError as e: app.logger.warn(str(e)) try: os.remove(app.config.get('SQL_CELERY_RESULTS_DB_FILE_PATH')) except OSError as e: app.logger.warn(str(e)) sync_role_definitions() worker_command = BASE_DIR + '/bin/superset worker' subprocess.Popen( worker_command, shell=True, stdout=subprocess.PIPE) admin = appbuilder.sm.find_user('admin') if not admin: appbuilder.sm.add_user( 'admin', 'admin', ' user', '[email protected]', appbuilder.sm.find_role('Admin'), password='general') cli.load_examples(load_test_data=True) @classmethod def tearDownClass(cls): subprocess.call( "ps auxww | grep 'celeryd' | awk '{print $2}' | xargs kill -9", shell=True ) subprocess.call( "ps auxww | grep 'superset worker' | awk '{print $2}' | " "xargs kill -9", shell=True ) def run_sql(self, db_id, sql, client_id, cta='false', tmp_table='tmp', async='false'): self.login() resp = self.client.post( '/superset/sql_json/', data=dict( database_id=db_id, sql=sql, async=async, select_as_cta=cta, tmp_table_name=tmp_table, client_id=client_id, ), ) self.logout() return json.loads(resp.data.decode('utf-8')) def test_add_limit_to_the_query(self): session = db.session main_db = self.get_main_database(db.session) eng = main_db.get_sqla_engine() select_query = "SELECT * FROM outer_space;" updated_select_query = main_db.wrap_sql_limit(select_query, 100) # Different DB engines have their own spacing while compiling # the queries, that's why ' '.join(query.split()) is used. # In addition some of the engines do not include OFFSET 0. self.assertTrue( "SELECT * FROM (SELECT * FROM outer_space;) AS inner_qry " "LIMIT 100" in ' '.join(updated_select_query.split()) ) select_query_no_semicolon = "SELECT * FROM outer_space" updated_select_query_no_semicolon = main_db.wrap_sql_limit( select_query_no_semicolon, 100) self.assertTrue( "SELECT * FROM (SELECT * FROM outer_space) AS inner_qry " "LIMIT 100" in ' '.join(updated_select_query_no_semicolon.split()) ) multi_line_query = ( "SELECT * FROM planets WHERE\n Luke_Father = 'Darth Vader';" ) updated_multi_line_query = main_db.wrap_sql_limit(multi_line_query, 100) self.assertTrue( "SELECT * FROM (SELECT * FROM planets WHERE " "Luke_Father = 'Darth Vader';) AS inner_qry LIMIT 100" in ' '.join(updated_multi_line_query.split()) ) def test_run_sync_query_dont_exist(self): main_db = self.get_main_database(db.session) db_id = main_db.id sql_dont_exist = 'SELECT name FROM table_dont_exist' result1 = self.run_sql(db_id, sql_dont_exist, "1", cta='true') self.assertTrue('error' in result1) def test_run_sync_query_cta(self): main_db = self.get_main_database(db.session) db_id = main_db.id eng = main_db.get_sqla_engine() perm_name = 'can_sql_json' sql_where = ( "SELECT name FROM ab_permission WHERE name='{}'".format(perm_name)) result2 = self.run_sql( db_id, sql_where, "2", tmp_table='tmp_table_2', cta='true') self.assertEqual(QueryStatus.SUCCESS, result2['query']['state']) self.assertEqual([], result2['data']) self.assertEqual([], result2['columns']) query2 = self.get_query_by_id(result2['query']['serverId']) # Check the data in the tmp table. df2 = pd.read_sql_query(sql=query2.select_sql, con=eng) data2 = df2.to_dict(orient='records') self.assertEqual([{'name': perm_name}], data2) def test_run_sync_query_cta_no_data(self): main_db = self.get_main_database(db.session) db_id = main_db.id sql_empty_result = 'SELECT * FROM ab_user WHERE id=666' result3 = self.run_sql( db_id, sql_empty_result, "3", tmp_table='tmp_table_3', cta='true') self.assertEqual(QueryStatus.SUCCESS, result3['query']['state']) self.assertEqual([], result3['data']) self.assertEqual([], result3['columns']) query3 = self.get_query_by_id(result3['query']['serverId']) self.assertEqual(QueryStatus.SUCCESS, query3.status) def test_run_async_query(self): main_db = self.get_main_database(db.session) eng = main_db.get_sqla_engine() sql_where = "SELECT name FROM ab_role WHERE name='Admin'" result = self.run_sql( main_db.id, sql_where, "4", async='true', tmp_table='tmp_async_1', cta='true') assert result['query']['state'] in ( QueryStatus.PENDING, QueryStatus.RUNNING, QueryStatus.SUCCESS) time.sleep(1) query = self.get_query_by_id(result['query']['serverId']) df = pd.read_sql_query(query.select_sql, con=eng) self.assertEqual(QueryStatus.SUCCESS, query.status) self.assertEqual([{'name': 'Admin'}], df.to_dict(orient='records')) self.assertEqual(QueryStatus.SUCCESS, query.status) self.assertTrue("FROM tmp_async_1" in query.select_sql) self.assertTrue("LIMIT 666" in query.select_sql) self.assertEqual( "CREATE TABLE tmp_async_1 AS \nSELECT name FROM ab_role " "WHERE name='Admin'", query.executed_sql) self.assertEqual(sql_where, query.sql) self.assertEqual(0, query.rows) self.assertEqual(666, query.limit) self.assertEqual(False, query.limit_used) self.assertEqual(True, query.select_as_cta) self.assertEqual(True, query.select_as_cta_used) @staticmethod def de_unicode_dict(d): def str_if_basestring(o): if isinstance(o, basestring): return str(o) return o return {str_if_basestring(k): str_if_basestring(d[k]) for k in d} @classmethod def dictify_list_of_dicts(cls, l, k): return {str(o[k]): cls.de_unicode_dict(o) for o in l} def test_get_columns(self): main_db = self.get_main_database(db.session) df = main_db.get_df("SELECT * FROM multiformat_time_series", None) cdf = dataframe.SupersetDataFrame(df) # Making ordering non-deterministic cols = self.dictify_list_of_dicts(cdf.columns, 'name') if main_db.sqlalchemy_uri.startswith('sqlite'): self.assertEqual(self.dictify_list_of_dicts([ {'is_date': True, 'type': 'STRING', 'name': 'ds', 'is_dim': False}, {'is_date': True, 'type': 'STRING', 'name': 'ds2', 'is_dim': False}, {'agg': 'sum', 'is_date': False, 'type': 'INT', 'name': 'epoch_ms', 'is_dim': False}, {'agg': 'sum', 'is_date': False, 'type': 'INT', 'name': 'epoch_s', 'is_dim': False}, {'is_date': True, 'type': 'STRING', 'name': 'string0', 'is_dim': False}, {'is_date': False, 'type': 'STRING', 'name': 'string1', 'is_dim': True}, {'is_date': True, 'type': 'STRING', 'name': 'string2', 'is_dim': False}, {'is_date': False, 'type': 'STRING', 'name': 'string3', 'is_dim': True}], 'name') , cols ) else: self.assertEqual(self.dictify_list_of_dicts([ {'is_date': True, 'type': 'DATETIME', 'name': 'ds', 'is_dim': False}, {'is_date': True, 'type': 'DATETIME', 'name': 'ds2', 'is_dim': False}, {'agg': 'sum', 'is_date': False, 'type': 'INT', 'name': 'epoch_ms', 'is_dim': False}, {'agg': 'sum', 'is_date': False, 'type': 'INT', 'name': 'epoch_s', 'is_dim': False}, {'is_date': True, 'type': 'STRING', 'name': 'string0', 'is_dim': False}, {'is_date': False, 'type': 'STRING', 'name': 'string1', 'is_dim': True}, {'is_date': True, 'type': 'STRING', 'name': 'string2', 'is_dim': False}, {'is_date': False, 'type': 'STRING', 'name': 'string3', 'is_dim': True}], 'name') , cols ) if __name__ == '__main__': unittest.main()
apache-2.0
heli522/scikit-learn
examples/linear_model/plot_ols_ridge_variance.py
387
2060
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Ordinary Least Squares and Ridge Regression Variance ========================================================= Due to the few points in each dimension and the straight line that linear regression uses to follow these points as well as it can, noise on the observations will cause great variance as shown in the first plot. Every line's slope can vary quite a bit for each prediction due to the noise induced in the observations. Ridge regression is basically minimizing a penalised version of the least-squared function. The penalising `shrinks` the value of the regression coefficients. Despite the few data points in each dimension, the slope of the prediction is much more stable and the variance in the line itself is greatly reduced, in comparison to that of the standard linear regression """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model X_train = np.c_[.5, 1].T y_train = [.5, 1] X_test = np.c_[0, 2].T np.random.seed(0) classifiers = dict(ols=linear_model.LinearRegression(), ridge=linear_model.Ridge(alpha=.1)) fignum = 1 for name, clf in classifiers.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.title(name) ax = plt.axes([.12, .12, .8, .8]) for _ in range(6): this_X = .1 * np.random.normal(size=(2, 1)) + X_train clf.fit(this_X, y_train) ax.plot(X_test, clf.predict(X_test), color='.5') ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10) clf.fit(X_train, y_train) ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue') ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10) ax.set_xticks(()) ax.set_yticks(()) ax.set_ylim((0, 1.6)) ax.set_xlabel('X') ax.set_ylabel('y') ax.set_xlim(0, 2) fignum += 1 plt.show()
bsd-3-clause
CompPhysics/ComputationalPhysics
doc/Programs/LecturePrograms/programs/RandomWalks/python/OneDimParticle.py
3
1539
# Program to test the Metropolis algorithm with one particle at given temp in # one dimension #!/usr/bin/env python import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt import random from math import sqrt, exp, log # initialize the rng with a seed random.seed() # Hard coding of input parameters MCcycles = 100000 Temperature = 2.0 beta = 1./Temperature InitialVelocity = -2.0 CurrentVelocity = InitialVelocity Energy = 0.5*InitialVelocity*InitialVelocity VelocityRange = 10*sqrt(Temperature) VelocityStep = 2*VelocityRange/10. AverageEnergy = Energy AverageEnergy2 = Energy*Energy VelocityValues = np.zeros(MCcycles) # The Monte Carlo sampling with Metropolis starts here for i in range (1, MCcycles, 1): TrialVelocity = CurrentVelocity + (2.0*random.random() - 1.0)*VelocityStep EnergyChange = 0.5*(TrialVelocity*TrialVelocity -CurrentVelocity*CurrentVelocity); if random.random() <= exp(-beta*EnergyChange): CurrentVelocity = TrialVelocity Energy += EnergyChange VelocityValues[i] = CurrentVelocity AverageEnergy += Energy AverageEnergy2 += Energy*Energy #Final averages AverageEnergy = AverageEnergy/MCcycles AverageEnergy2 = AverageEnergy2/MCcycles Variance = AverageEnergy2 - AverageEnergy*AverageEnergy print(AverageEnergy, Variance) n, bins, patches = plt.hist(VelocityValues, 400, facecolor='green') plt.xlabel('$v$') plt.ylabel('Velocity distribution P(v)') plt.title(r'Velocity histogram at $k_BT=2$') plt.axis([-5, 5, 0, 600]) plt.grid(True) plt.show()
cc0-1.0
DC23/plumbing
plumbing/csv_tables.py
1
2259
# Built-in modules # import os, shutil, csv from itertools import izip # Internal modules # from autopaths import FilePath from tmpstuff import TmpFile # Third party modules # import sh, pandas ################################################################################ class CSVTable(FilePath): d = ',' def __init__(self, path, d=None): if isinstance(path, FilePath): path = path.path self.path = path if d is not None: self.d = d def remove_first_line(self): sh.sed('-i', '1d', self.path) def remove_last_line(self): sh.sed('-i', '1d', self.path) def replace_title(self, before, after): sh.sed('-i', '1s/%s/%s/' % (before, after), self.path) def rewrite_lines(self, lines, path=None): if path is None: with TmpFile() as tmpfile: tmpfile.handle.writelines(lines) os.remove(self.path) shutil.move(tmpfile.path, self.path) else: with open(path, 'w') as handle: handle.writelines(lines) def integer_lines(self): handle = open(self.path) yield handle.next() for line in handle: line = line.split() yield line[0] + self.d + self.d.join(map(str, map(int, map(float, line[1:])))) + '\n' def to_integer(self, path=None): self.rewrite_lines(self.integer_lines(), path) def min_sum_lines(self, minimum): handle = open(self.path) yield handle.next() for line in handle: if sum(map(int, line.split()[1:])) >= minimum: yield line def filter_line_sum(self, minimum, path=None): self.rewrite_lines(self.min_sum_lines(minimum), path) def transposed_lines(self, d): rows = izip(*csv.reader(open(self.path), delimiter=self.d)) for row in rows: yield d.join(row) + '\n' def transpose(self, path=None, d=None): self.rewrite_lines(self.transposed_lines(self.d if d is None else d), path) def to_dataframe(self, **kwargs): """Load up the CSV file as a pandas dataframe""" return pandas.io.parsers.read_csv(self.path, sep=self.d, **kwargs) ################################################################################ class TSVTable(CSVTable): d = '\t'
mit
mattilyra/scikit-learn
sklearn/utils/tests/test_validation.py
56
18600
"""Tests for input validation functions""" import warnings from tempfile import NamedTemporaryFile from itertools import product import numpy as np from numpy.testing import assert_array_equal import scipy.sparse as sp from nose.tools import assert_raises, assert_true, assert_false, assert_equal from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils import as_float_array, check_array, check_symmetric from sklearn.utils import check_X_y from sklearn.utils.mocking import MockDataFrame from sklearn.utils.estimator_checks import NotAnArray from sklearn.random_projection import sparse_random_matrix from sklearn.linear_model import ARDRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.svm import SVR from sklearn.datasets import make_blobs from sklearn.utils.validation import ( has_fit_parameter, check_is_fitted, check_consistent_length, ) from sklearn.exceptions import NotFittedError from sklearn.exceptions import DataConversionWarning from sklearn.utils.testing import assert_raise_message def test_as_float_array(): # Test function for as_float_array X = np.ones((3, 10), dtype=np.int32) X = X + np.arange(10, dtype=np.int32) # Checks that the return type is ok X2 = as_float_array(X, copy=False) np.testing.assert_equal(X2.dtype, np.float32) # Another test X = X.astype(np.int64) X2 = as_float_array(X, copy=True) # Checking that the array wasn't overwritten assert_true(as_float_array(X, False) is not X) # Checking that the new type is ok np.testing.assert_equal(X2.dtype, np.float64) # Here, X is of the right type, it shouldn't be modified X = np.ones((3, 2), dtype=np.float32) assert_true(as_float_array(X, copy=False) is X) # Test that if X is fortran ordered it stays X = np.asfortranarray(X) assert_true(np.isfortran(as_float_array(X, copy=True))) # Test the copy parameter with some matrices matrices = [ np.matrix(np.arange(5)), sp.csc_matrix(np.arange(5)).toarray(), sparse_random_matrix(10, 10, density=0.10).toarray() ] for M in matrices: N = as_float_array(M, copy=True) N[0, 0] = np.nan assert_false(np.isnan(M).any()) def test_np_matrix(): # Confirm that input validation code does not return np.matrix X = np.arange(12).reshape(3, 4) assert_false(isinstance(as_float_array(X), np.matrix)) assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix)) assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix)) def test_memmap(): # Confirm that input validation code doesn't copy memory mapped arrays asflt = lambda x: as_float_array(x, copy=False) with NamedTemporaryFile(prefix='sklearn-test') as tmp: M = np.memmap(tmp, shape=(10, 10), dtype=np.float32) M[:] = 0 for f in (check_array, np.asarray, asflt): X = f(M) X[:] = 1 assert_array_equal(X.ravel(), M.ravel()) X[:] = 0 def test_ordering(): # Check that ordering is enforced correctly by validation utilities. # We need to check each validation utility, because a 'copy' without # 'order=K' will kill the ordering. X = np.ones((10, 5)) for A in X, X.T: for copy in (True, False): B = check_array(A, order='C', copy=copy) assert_true(B.flags['C_CONTIGUOUS']) B = check_array(A, order='F', copy=copy) assert_true(B.flags['F_CONTIGUOUS']) if copy: assert_false(A is B) X = sp.csr_matrix(X) X.data = X.data[::-1] assert_false(X.data.flags['C_CONTIGUOUS']) @ignore_warnings def test_check_array(): # accept_sparse == None # raise error on sparse inputs X = [[1, 2], [3, 4]] X_csr = sp.csr_matrix(X) assert_raises(TypeError, check_array, X_csr) # ensure_2d assert_warns(DeprecationWarning, check_array, [0, 1, 2]) X_array = check_array([0, 1, 2]) assert_equal(X_array.ndim, 2) X_array = check_array([0, 1, 2], ensure_2d=False) assert_equal(X_array.ndim, 1) # don't allow ndim > 3 X_ndim = np.arange(8).reshape(2, 2, 2) assert_raises(ValueError, check_array, X_ndim) check_array(X_ndim, allow_nd=True) # doesn't raise # force_all_finite X_inf = np.arange(4).reshape(2, 2).astype(np.float) X_inf[0, 0] = np.inf assert_raises(ValueError, check_array, X_inf) check_array(X_inf, force_all_finite=False) # no raise # nan check X_nan = np.arange(4).reshape(2, 2).astype(np.float) X_nan[0, 0] = np.nan assert_raises(ValueError, check_array, X_nan) check_array(X_inf, force_all_finite=False) # no raise # dtype and order enforcement. X_C = np.arange(4).reshape(2, 2).copy("C") X_F = X_C.copy("F") X_int = X_C.astype(np.int) X_float = X_C.astype(np.float) Xs = [X_C, X_F, X_int, X_float] dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object] orders = ['C', 'F', None] copys = [True, False] for X, dtype, order, copy in product(Xs, dtypes, orders, copys): X_checked = check_array(X, dtype=dtype, order=order, copy=copy) if dtype is not None: assert_equal(X_checked.dtype, dtype) else: assert_equal(X_checked.dtype, X.dtype) if order == 'C': assert_true(X_checked.flags['C_CONTIGUOUS']) assert_false(X_checked.flags['F_CONTIGUOUS']) elif order == 'F': assert_true(X_checked.flags['F_CONTIGUOUS']) assert_false(X_checked.flags['C_CONTIGUOUS']) if copy: assert_false(X is X_checked) else: # doesn't copy if it was already good if (X.dtype == X_checked.dtype and X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS'] and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']): assert_true(X is X_checked) # allowed sparse != None X_csc = sp.csc_matrix(X_C) X_coo = X_csc.tocoo() X_dok = X_csc.todok() X_int = X_csc.astype(np.int) X_float = X_csc.astype(np.float) Xs = [X_csc, X_coo, X_dok, X_int, X_float] accept_sparses = [['csr', 'coo'], ['coo', 'dok']] for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses, copys): with warnings.catch_warnings(record=True) as w: X_checked = check_array(X, dtype=dtype, accept_sparse=accept_sparse, copy=copy) if (dtype is object or sp.isspmatrix_dok(X)) and len(w): message = str(w[0].message) messages = ["object dtype is not supported by sparse matrices", "Can't check dok sparse matrix for nan or inf."] assert_true(message in messages) else: assert_equal(len(w), 0) if dtype is not None: assert_equal(X_checked.dtype, dtype) else: assert_equal(X_checked.dtype, X.dtype) if X.format in accept_sparse: # no change if allowed assert_equal(X.format, X_checked.format) else: # got converted assert_equal(X_checked.format, accept_sparse[0]) if copy: assert_false(X is X_checked) else: # doesn't copy if it was already good if (X.dtype == X_checked.dtype and X.format == X_checked.format): assert_true(X is X_checked) # other input formats # convert lists to arrays X_dense = check_array([[1, 2], [3, 4]]) assert_true(isinstance(X_dense, np.ndarray)) # raise on too deep lists assert_raises(ValueError, check_array, X_ndim.tolist()) check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise # convert weird stuff to arrays X_no_array = NotAnArray(X_dense) result = check_array(X_no_array) assert_true(isinstance(result, np.ndarray)) def test_check_array_pandas_dtype_object_conversion(): # test that data-frame like objects with dtype object # get converted X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object) X_df = MockDataFrame(X) assert_equal(check_array(X_df).dtype.kind, "f") assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f") # smoke-test against dataframes with column named "dtype" X_df.dtype = "Hans" assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f") def test_check_array_dtype_stability(): # test that lists with ints don't get converted to floats X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] assert_equal(check_array(X).dtype.kind, "i") assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i") def test_check_array_dtype_warning(): X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] X_float64 = np.asarray(X_int_list, dtype=np.float64) X_float32 = np.asarray(X_int_list, dtype=np.float32) X_int64 = np.asarray(X_int_list, dtype=np.int64) X_csr_float64 = sp.csr_matrix(X_float64) X_csr_float32 = sp.csr_matrix(X_float32) X_csc_float32 = sp.csc_matrix(X_float32) X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32) y = [0, 0, 1] integer_data = [X_int64, X_csc_int32] float64_data = [X_float64, X_csr_float64] float32_data = [X_float32, X_csr_float32, X_csc_float32] for X in integer_data: X_checked = assert_no_warnings(check_array, X, dtype=np.float64, accept_sparse=True) assert_equal(X_checked.dtype, np.float64) X_checked = assert_warns(DataConversionWarning, check_array, X, dtype=np.float64, accept_sparse=True, warn_on_dtype=True) assert_equal(X_checked.dtype, np.float64) # Check that the warning message includes the name of the Estimator X_checked = assert_warns_message(DataConversionWarning, 'SomeEstimator', check_array, X, dtype=[np.float64, np.float32], accept_sparse=True, warn_on_dtype=True, estimator='SomeEstimator') assert_equal(X_checked.dtype, np.float64) X_checked, y_checked = assert_warns_message( DataConversionWarning, 'KNeighborsClassifier', check_X_y, X, y, dtype=np.float64, accept_sparse=True, warn_on_dtype=True, estimator=KNeighborsClassifier()) assert_equal(X_checked.dtype, np.float64) for X in float64_data: X_checked = assert_no_warnings(check_array, X, dtype=np.float64, accept_sparse=True, warn_on_dtype=True) assert_equal(X_checked.dtype, np.float64) X_checked = assert_no_warnings(check_array, X, dtype=np.float64, accept_sparse=True, warn_on_dtype=False) assert_equal(X_checked.dtype, np.float64) for X in float32_data: X_checked = assert_no_warnings(check_array, X, dtype=[np.float64, np.float32], accept_sparse=True) assert_equal(X_checked.dtype, np.float32) assert_true(X_checked is X) X_checked = assert_no_warnings(check_array, X, dtype=[np.float64, np.float32], accept_sparse=['csr', 'dok'], copy=True) assert_equal(X_checked.dtype, np.float32) assert_false(X_checked is X) X_checked = assert_no_warnings(check_array, X_csc_float32, dtype=[np.float64, np.float32], accept_sparse=['csr', 'dok'], copy=False) assert_equal(X_checked.dtype, np.float32) assert_false(X_checked is X_csc_float32) assert_equal(X_checked.format, 'csr') def test_check_array_min_samples_and_features_messages(): # empty list is considered 2D by default: msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required." assert_raise_message(ValueError, msg, check_array, [[]]) # If considered a 1D collection when ensure_2d=False, then the minimum # number of samples will break: msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required." assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False) # Invalid edge case when checking the default minimum sample of a scalar msg = "Singleton array array(42) cannot be considered a valid collection." assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False) # But this works if the input data is forced to look like a 2 array with # one sample and one feature: X_checked = assert_warns(DeprecationWarning, check_array, [42], ensure_2d=True) assert_array_equal(np.array([[42]]), X_checked) # Simulate a model that would need at least 2 samples to be well defined X = np.ones((1, 10)) y = np.ones(1) msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required." assert_raise_message(ValueError, msg, check_X_y, X, y, ensure_min_samples=2) # The same message is raised if the data has 2 dimensions even if this is # not mandatory assert_raise_message(ValueError, msg, check_X_y, X, y, ensure_min_samples=2, ensure_2d=False) # Simulate a model that would require at least 3 features (e.g. SelectKBest # with k=3) X = np.ones((10, 2)) y = np.ones(2) msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required." assert_raise_message(ValueError, msg, check_X_y, X, y, ensure_min_features=3) # Only the feature check is enabled whenever the number of dimensions is 2 # even if allow_nd is enabled: assert_raise_message(ValueError, msg, check_X_y, X, y, ensure_min_features=3, allow_nd=True) # Simulate a case where a pipeline stage as trimmed all the features of a # 2D dataset. X = np.empty(0).reshape(10, 0) y = np.ones(10) msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required." assert_raise_message(ValueError, msg, check_X_y, X, y) # nd-data is not checked for any minimum number of features by default: X = np.ones((10, 0, 28, 28)) y = np.ones(10) X_checked, y_checked = check_X_y(X, y, allow_nd=True) assert_array_equal(X, X_checked) assert_array_equal(y, y_checked) def test_has_fit_parameter(): assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight")) assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight")) assert_true(has_fit_parameter(SVR, "sample_weight")) assert_true(has_fit_parameter(SVR(), "sample_weight")) def test_check_symmetric(): arr_sym = np.array([[0, 1], [1, 2]]) arr_bad = np.ones(2) arr_asym = np.array([[0, 2], [0, 2]]) test_arrays = {'dense': arr_asym, 'dok': sp.dok_matrix(arr_asym), 'csr': sp.csr_matrix(arr_asym), 'csc': sp.csc_matrix(arr_asym), 'coo': sp.coo_matrix(arr_asym), 'lil': sp.lil_matrix(arr_asym), 'bsr': sp.bsr_matrix(arr_asym)} # check error for bad inputs assert_raises(ValueError, check_symmetric, arr_bad) # check that asymmetric arrays are properly symmetrized for arr_format, arr in test_arrays.items(): # Check for warnings and errors assert_warns(UserWarning, check_symmetric, arr) assert_raises(ValueError, check_symmetric, arr, raise_exception=True) output = check_symmetric(arr, raise_warning=False) if sp.issparse(output): assert_equal(output.format, arr_format) assert_array_equal(output.toarray(), arr_sym) else: assert_array_equal(output, arr_sym) def test_check_is_fitted(): # Check is ValueError raised when non estimator instance passed assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_") assert_raises(TypeError, check_is_fitted, "SVR", "support_") ard = ARDRegression() svr = SVR() try: assert_raises(NotFittedError, check_is_fitted, ard, "coef_") assert_raises(NotFittedError, check_is_fitted, svr, "support_") except ValueError: assert False, "check_is_fitted failed with ValueError" # NotFittedError is a subclass of both ValueError and AttributeError try: check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s") except ValueError as e: assert_equal(str(e), "Random message ARDRegression, ARDRegression") try: check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s") except AttributeError as e: assert_equal(str(e), "Another message SVR, SVR") ard.fit(*make_blobs()) svr.fit(*make_blobs()) assert_equal(None, check_is_fitted(ard, "coef_")) assert_equal(None, check_is_fitted(svr, "support_")) def test_check_consistent_length(): check_consistent_length([1], [2], [3], [4], [5]) check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b']) check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2))) assert_raises_regexp(ValueError, 'inconsistent numbers of samples', check_consistent_length, [1, 2], [1]) assert_raises_regexp(TypeError, 'got <\w+ \'int\'>', check_consistent_length, [1, 2], 1) assert_raises_regexp(TypeError, 'got <\w+ \'object\'>', check_consistent_length, [1, 2], object()) assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1)) # Despite ensembles having __len__ they must raise TypeError assert_raises_regexp(TypeError, 'estimator', check_consistent_length, [1, 2], RandomForestRegressor()) # XXX: We should have a test with a string, but what is correct behaviour?
bsd-3-clause
homeslike/OpticalTweezer
scripts/spring15_p0.3_at0.1/vCOMhistogramMass.py
27
3006
import math import sys import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab from subprocess import call from scipy.stats import norm # proc = call("ls *.dat",shell=True) # datetime = "170123_2033_" datetime = sys.argv[1]+"_" gasTempDataIn = np.genfromtxt(datetime+"gasTempData.dat",usecols=0,skip_header=100) gasTempDataOut = np.genfromtxt(datetime+"gasTempData.dat",usecols=1,skip_header=100) vCOMData_x = np.genfromtxt(datetime+"vCOMData.dat",usecols=0,skip_header=100) vCOMData_y = np.genfromtxt(datetime+"vCOMData.dat",usecols=1,skip_header=100) vCOMData_z = np.genfromtxt(datetime+"vCOMData.dat",usecols=2,skip_header=100) internalTempData = np.genfromtxt(datetime+"temperature_internal.dat",skip_header=200) N = 32 internalTemp = np.mean(internalTempData) vSqd = [] for i in range(0,len(vCOMData_x)): vSqd.append(32*(vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i])*0.5) vSqdMean = np.mean(vSqd) # histogram_x,bins_x = np.histogram(vCOMData_x,bins=100,normed=False) # histogram_y,bins_y = np.histogram(vCOMData_y,bins=100,normed=False) # histogram_z,bins_z = np.histogram(vCOMData_z,bins=100,normed=False) histogram_x,bins_x = np.histogram(vCOMData_x,bins=100,normed=True) histogram_y,bins_y = np.histogram(vCOMData_y,bins=100,normed=True) histogram_z,bins_z = np.histogram(vCOMData_z,bins=100,normed=True) inTemp = np.mean(gasTempDataIn) outTemp = np.mean(gasTempDataOut) statistics = open(datetime+"statistics_mass.dat","w") statistics.write("GasIn: " + str(inTemp)+" +- " + str(np.std(gasTempDataIn)) + "\n") statistics.write("GasOut: " + str(outTemp)+" +- " +str(np.std(gasTempDataOut)) + "\n") statistics.write("T_COM: " + str(2./3. * vSqdMean)+" +- " +str(np.std(vSqd)) + "\n") statistics.write("T_INT: " + str(internalTemp)+" +- " +str(np.std(internalTempData)) + "\n") statistics.write("Mu_x " + str(np.mean(vCOMData_x))+"\n") statistics.write("Sigma_x: " + str(np.std(vCOMData_x))+"\n") statistics.write("Mu_y " + str(np.mean(vCOMData_y))+"\n") statistics.write("Sigma_y: " + str(np.std(vCOMData_y))+"\n") statistics.write("Mu_z " + str(np.mean(vCOMData_z))+"\n") statistics.write("Sigma_z: " + str(np.std(vCOMData_z))+"\n") histogram_x_file = open(datetime+"histogram_mass_vx.dat","w") histogram_y_file = open(datetime+"histogram_mass_vy.dat","w") histogram_z_file = open(datetime+"histogram_mass_vz.dat","w") for i in range(0,len(histogram_x)): histogram_x_file.write(str(bins_x[i]) + "\t" + str(histogram_x[i]) + "\n") histogram_y_file.write(str(bins_y[i]) + "\t" + str(histogram_y[i]) + "\n") histogram_z_file.write(str(bins_z[i]) + "\t" + str(histogram_z[i]) + "\n") # plt.figure(1) # plt.hist(vCOMData_x,bins=100) # plt.figure(2) # plt.hist(vCOMData_y,bins=100) # plt.figure(3) # plt.hist(vCOMData_z,bins=100) # plt.show() # plt.figure(1) # plt.plot(vSqd) # plt.plot((0,700),(vSqdMean,vSqdMean)) # plt.figure(2) # plt.hist(vCOMData_x,bins=100,normed=True) # plt.plot(x,gasInPDF) # plt.show()
mit
JazzeYoung/VeryDeepAutoEncoder
pylearn2/optimization/test_batch_gradient_descent.py
44
6402
from __future__ import print_function from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent import theano.tensor as T from pylearn2.utils import sharedX import numpy as np from theano.compat.six.moves import xrange from theano import config from theano.printing import min_informative_str def test_batch_gradient_descent(): """ Verify that batch gradient descent works by checking that it minimizes a quadratic function f(x) = x^T A x + b^T x + c correctly for several sampled values of A, b, and c. The ground truth minimizer is x = np.linalg.solve(A,-b)""" n = 3 A = T.matrix(name = 'A') b = T.vector(name = 'b') c = T.scalar(name = 'c') x = sharedX( np.zeros((n,)) , name = 'x') half = np.cast[config.floatX](0.5) obj = half * T.dot(T.dot(x,A),x)+T.dot(b,x)+c minimizer = BatchGradientDescent( objective = obj, params = [ x], inputs = [ A, b, c]) num_samples = 3 rng = np.random.RandomState([1,2,3]) for i in xrange(num_samples): A = np.cast[config.floatX](rng.randn(1.5*n,n)) A = np.cast[config.floatX](np.dot(A.T,A)) A += np.cast[config.floatX](np.identity(n) * .02) b = np.cast[config.floatX](rng.randn(n)) c = np.cast[config.floatX](rng.randn()) x.set_value(np.cast[config.floatX](rng.randn(n))) analytical_x = np.linalg.solve(A,-b) actual_obj = minimizer.minimize(A,b,c) actual_x = x.get_value() #Check that the value returned by the minimize method #is the objective function value at the parameters #chosen by the minimize method cur_obj = minimizer.obj(A,b,c) assert np.allclose(actual_obj, cur_obj) x.set_value(analytical_x) analytical_obj = minimizer.obj(A,b,c) #make sure the objective function is accurate to first 4 digits condition1 = not np.allclose(analytical_obj, actual_obj) condition2 = np.abs(analytical_obj-actual_obj) >= 1e-4 * \ np.abs(analytical_obj) if (config.floatX == 'float64' and condition1) \ or (config.floatX == 'float32' and condition2): print('objective function value came out wrong on sample ',i) print('analytical obj', analytical_obj) print('actual obj',actual_obj) """ The following section of code was used to verify that numerical error can make the objective function look non-convex print('Checking for numerically induced non-convex behavior') def f(x): return 0.5 * np.dot(x,np.dot(A,x)) + np.dot(b,x) + c x.set_value(actual_x) minimizer._compute_grad(A,b,c) minimizer._normalize_grad() d = minimizer.param_to_grad_shared[x].get_value() x = actual_x.copy() prev = f(x) print(prev) step_size = 1e-4 x += step_size * d cur = f(x) print(cur) cur_sgn = np.sign(cur-prev) flip_cnt = 0 for i in xrange(10000): x += step_size * d prev = cur cur = f(x) print(cur) prev_sgn = cur_sgn cur_sgn = np.sign(cur-prev) if cur_sgn != prev_sgn: print('flip') flip_cnt += 1 if flip_cnt > 1: print("Non-convex!") from matplotlib import pyplot as plt y = [] x = actual_x.copy() for j in xrange(10000): y.append(f(x)) x += step_size * d plt.plot(y) plt.show() assert False print('None found') """ #print 'actual x',actual_x #print 'A:' #print A #print 'b:' #print b #print 'c:' #print c x.set_value(actual_x) minimizer._compute_grad(A,b,c) x_grad = minimizer.param_to_grad_shared[x] actual_grad = x_grad.get_value() correct_grad = 0.5 * np.dot(A,x.get_value())+ 0.5 * \ np.dot(A.T, x.get_value()) +b if not np.allclose(actual_grad, correct_grad): print('gradient was wrong at convergence point') print('actual grad: ') print(actual_grad) print('correct grad: ') print(correct_grad) print('max difference: ', end='') np.abs(actual_grad-correct_grad).max() assert False minimizer._normalize_grad() d = minimizer.param_to_grad_shared[x].get_value() step_len = ( np.dot(b,d) + 0.5 * np.dot(d,np.dot(A,actual_x)) \ + 0.5 * np.dot(actual_x,np.dot(A,d)) ) \ / np.dot(d, np.dot(A,d)) g = np.dot(A,actual_x)+b deriv = np.dot(g,d) print('directional deriv at actual', deriv) print('optimal step_len', step_len) optimal_x = actual_x - d * step_len g = np.dot(A,optimal_x) + b deriv = np.dot(g,d) print('directional deriv at optimal: ',deriv) x.set_value(optimal_x) print('obj at optimal: ',minimizer.obj(A,b,c)) print('eigenvalue range:') val, vec = np.linalg.eig(A) print((val.min(),val.max())) print('condition number: ',(val.max()/val.min())) assert False if __name__ == '__main__': test_batch_gradient_descent()
bsd-3-clause
mayblue9/bokeh
bokeh/compat/mplexporter/renderers/vincent_renderer.py
64
1922
import warnings from .base import Renderer from ..exporter import Exporter class VincentRenderer(Renderer): def open_figure(self, fig, props): self.chart = None self.figwidth = int(props['figwidth'] * props['dpi']) self.figheight = int(props['figheight'] * props['dpi']) def draw_line(self, data, coordinates, style, label, mplobj=None): import vincent # only import if VincentRenderer is used if coordinates != 'data': warnings.warn("Only data coordinates supported. Skipping this") linedata = {'x': data[:, 0], 'y': data[:, 1]} line = vincent.Line(linedata, iter_idx='x', width=self.figwidth, height=self.figheight) # TODO: respect the other style settings line.scales['color'].range = [style['color']] if self.chart is None: self.chart = line else: warnings.warn("Multiple plot elements not yet supported") def draw_markers(self, data, coordinates, style, label, mplobj=None): import vincent # only import if VincentRenderer is used if coordinates != 'data': warnings.warn("Only data coordinates supported. Skipping this") markerdata = {'x': data[:, 0], 'y': data[:, 1]} markers = vincent.Scatter(markerdata, iter_idx='x', width=self.figwidth, height=self.figheight) # TODO: respect the other style settings markers.scales['color'].range = [style['facecolor']] if self.chart is None: self.chart = markers else: warnings.warn("Multiple plot elements not yet supported") def fig_to_vincent(fig): """Convert a matplotlib figure to a vincent object""" renderer = VincentRenderer() exporter = Exporter(renderer) exporter.run(fig) return renderer.chart
bsd-3-clause
nkoukou/University_Projects_Year_3
Growing_Networks/analysis.py
1
16990
''' This module contains analysis for a growing network. ''' import numpy as np import scipy.optimize as sco import scipy.stats as scs import matplotlib as mat import matplotlib.pylab as plt import matplotlib.cm as cm from graph import Graph import logbin as lb font = {'size' : 20} mat.rc('font', **font) class Analysis(object): def __init__(self, N, m, iters=100, method=0, mode='temp'): ''' Instatiates an object which contains methods for analysing growing networks. Parameter iters indicates how many networks will be used to improve statistics. Parameter method indicates how the network grows: - 0: preferential attachment to vertex proportional to its degree - 1xx: random attachment to vertex, after random walk of length xx Parameter mode indicates how the data are loaded: - temp: runs the networks just-in-time and directly loads the data - save: runs the networks just-in-time and saves the data as well - load: loads already saved data ''' self.N = int(N) self.m = int(m) self.iters = iters if method in np.append(np.arange(100, 110), 0): self.method = method else: raise ValueError('Not implemented method') if mode in ['temp', 'save', 'load']: self.mode = mode else: raise ValueError('Not implemented mode') self.k = None self.p = None self.degrees = None self.kmax = None self.xb = None self.degb = None self.degb_err = None self.pb = None def setmethod(self, method): ''' Setter for the method of attaching edges from a new vertex of the graph. ''' if method in np.append(np.arange(100, 200), 0): self.method = method else: raise ValueError('Not implemented method') def setiters(self, iters): ''' Setter for number of iterations to create new graphs. ''' self.iters = int(iters) def setm(self, m): ''' Setter for number of attachments m of new vertex. ''' self.m = int(m) self.degrees = None self.kmax = None def setN(self, N): ''' Setter for number N of final vertices in the network. ''' self.N = int(N) self.degrees = None self.kmax = None def setkp(self): ''' Setter for the range of degree k and the theoretical distribution across that range. ''' if self.degrees is None: self.run_graph() self.k = np.arange(self.m, self.degrees.shape[1]).astype(np.float) if self.method==0: self.p = 2.*self.m*(self.m+1)/(self.k*(self.k+1)*(self.k+2)) elif self.method==100: self.p = 1./(1+self.m)*np.power(self.m/(1.+self.m), self.k-self.m) def name_file(self, i): ''' Creates the file name that is going to be loaded or saved based on given iteration number i. ''' tail = '_N{0}_m{1}_i{2}_{3}'.format(int(np.log10(self.N)), str(self.m).zfill(2), str(i).zfill(3), str(self.method).zfill(3)) if self.mode=='load': tail +='.npy' dstr = 'runs/deg'+tail return dstr def save_all(self): ''' Saves all relevant data for analysis. ''' if self.mode!='save': raise ValueError('Object not in save mode') self.setm(3) self.setiters(1000) for method in [0, 100]: self.setmethod(method) for N in [1e2, 1e3, 1e4, 1e5, 1e6, 1e7]: self.setN(N) if self.N==int(1e6): for m in [1, 3, 5, 10]: self.setm(m) self.run_graph() self.setm(3) else: self.run_graph() def run_graph(self): ''' Loads data from networks. ''' if self.mode=='save': g = Graph(self.N, self.m, self.method) for i in xrange(self.iters): g.initialize() g.grow() deg = g.deg() dstr = self.name_file(i) np.save(dstr, deg) else: self.kmax = np.zeros(self.iters, dtype=np.int64) self.degrees = np.zeros((self.iters,1), dtype=np.int64) if self.mode=='temp': g = Graph(self.N, self.m, self.method) for i in xrange(self.iters): if self.mode=='temp': g.initialize() g.grow() deg = g.deg() elif self.mode=='load': dstr = self.name_file(i) deg = np.load(dstr) self.kmax[i] = deg.size - 1 if deg.size > self.degrees.shape[1]: padding = deg.size - self.degrees.shape[1] self.degrees = np.pad(self.degrees, ((0,0),(0,padding)), 'constant', constant_values=0) else: padding = self.degrees.shape[1] - deg.size deg = np.pad(deg, (0, padding), 'constant', constant_values=0) self.degrees[i] = deg self.setkp() def calc_stats(self, stat='chisq', notail=850): ''' Analyses statistically how close the observed distribution is to the theoretical one. Parameter stat determines the statistical test used: - chisq ~ Pearson chi squared (plots for all tails if notail=-1) - redchisq ~ Reduced chi squared - ks ~ Kolmogorov Smirnov one-sample test - ks2 ~ Kolmogorov Smirnov two-sample test ''' if self.degrees is None: self.run_graph() y = self.degrees[:,self.m:].sum(0) if stat=='chisq': p = self.N * self.iters * self.p if notail==-1: fig = plt.figure() ax = fig.add_subplot(111) chis = [] for notail in xrange(1,p.size): yv, pv = y[:notail], p[:notail] chi2 = scs.chisquare(yv, pv, ddof=0) chis.append(chi2[1]) ax.plot(np.arange(1, p.size)/1000., chis, 'g.', markersize=1) ax.set_xlabel('Degree $k/10^3$') ax.set_ylabel('$p$-value') ax.axhline(y=0.1, c='r', ls='--') else: y, p = y[:notail], p[:notail] chi2 = scs.chisquare(y, p, ddof=0) return chi2 # np.sum((np.absolute(y-p)-0.5)**2/p) # - Yule's correction elif stat=='redchisq': p = self.N * self.iters * self.p sy = self.N * self.iters * self.degrees[:,self.m:].std(0) y, p, sy = y[:notail], p[:notail], sy[:notail] c = (y - p)/sy chi2 = np.sum(c*c) return chi2 / (notail - 1) elif stat=='ks': y = y/np.sum(y).astype(np.float) ks = np.random.choice(self.k, size=int(1e6), p=y) ks = np.sort(ks) indic = np.arange(1, len(ks)+1)/float(len(ks)) cdf = np.cumsum(self.p) it = np.bincount(ks.astype(np.int))[self.m:] ix = np.cumsum(it)[:-1] ds = np.absolute(indic[ix-1] - cdf[:int(ks.max()-self.m)]) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(self.k, cdf, 'k-') ax.set_xlabel('Degree $k$') ax.set_ylabel('Theoretical $CDF$ - Observed $ECDF$') ax.plot(ks, indic, 'r-', label='$ECDF\ d_{KS} = %f$'%(np.around(ds.max(), 5))) ax.legend(loc='lower right') return ds.max() elif stat=='ks2': y = y/np.sum(y).astype(np.float) ks = np.random.choice(self.k, size=int(1e6), p=y) ps = np.random.choice(self.k, size=int(1e6), p=self.p/np.sum(self.p)) ks2 = scs.ks_2samp(ks[:notail], ps[:notail]) return ks2 def process_deg(self): ''' Log bins data and creates theoretical distributions over the log-binned range. ''' xbs = np.zeros(1) degbs = np.zeros((self.iters, 1)) for i in xrange(self.iters): deg = self.degrees[i] if self.N > deg.size: deg = np.pad(deg, (0, self.N-deg.size), 'constant', constant_values=0) nodes = np.repeat(np.arange(deg.size), deg).astype('int') xb, degb = lb.log_bin(nodes, bin_start=self.m, first_bin_width=1., a=1.25, datatype='int', drop_zeros=True) if xb.size > xbs.size: padding = xb.size - xbs.size xbs = xb degbs = np.pad(degbs, ((0,0),(0,padding)), 'constant', constant_values=0) else: padding = xbs.size - xb.size degb = np.pad(degb, (0,padding), 'constant', constant_values=0) degbs[i] = degb self.xb = xbs self.degb = degbs.mean(0) self.degb_err = degbs.std(0) if self.method==0: self.pb = 2.*self.m*(self.m+1)/(xbs*(xbs+1)*(xbs+2)) elif self.method==100: self.pb = 1./(1+self.m) * np.power(self.m/(1.+self.m), xbs-self.m) def plot_coefs(self, Ns=[1e2, 1e3, 1e4, 1e5, 1e6, 1e7], ms=[1,3,5,10]): ''' Plots coefficient of k_max vs N against m. ''' fig = plt.figure() ax = fig.add_subplot(111) ms = np.array(ms) coefs = np.zeros(len(ms), dtype='float') coefs_err = np.zeros(len(ms), dtype='float') for i in xrange(len(ms)): print 'm =', ms[i], 'loading..' self.setm(ms[i]) kavg = np.zeros(len(Ns), dtype='float') kerr = np.zeros(len(Ns), dtype='float') for j in xrange(len(Ns)): self.setN(Ns[j]) self.run_graph() kavg[j] = self.kmax.mean() kerr[j] = self.kmax.std() f = lambda x, m, b: m*x+b if self.method==0: z, cov = sco.curve_fit(f, np.log(Ns), np.log(kavg), sigma=kerr/kavg, absolute_sigma=True) coefs[i] = np.exp(z[1]) coefs_err[i] = coefs[i]*np.sqrt(cov[1][1]) elif self.method==100: z, cov = sco.curve_fit(f, np.log(Ns), kavg, sigma=kerr, absolute_sigma=True) y = 1./np.log(1+1./ms) coefs[i] = z[0] coefs_err[i] = np.sqrt(cov[0][0]) if self.method==0: y = np.sqrt(ms*(ms+1.)) if self.method==100: y = 1./np.log(1+1./ms) ax.errorbar(ms, coefs, yerr=coefs_err, fmt='bo', label='Observed') ax.plot(ms, y, 'r--', label='Theory') ax.set_xlabel('Edges added per step $m$') ax.set_ylabel('$k_{max}\ coefficient\ C$') ax.legend(loc='upper left') def plot_ms(self, ms=[1,3,5,10]): ''' Plots degree distribution for different m. ''' fig = plt.figure() ax = fig.add_subplot(111) c = cm.rainbow(np.linspace(0, 1, len(ms))) for i in xrange(len(ms)): print 'm =', ms[i], 'loading..' self.setm(ms[i]) self.run_graph() self.process_deg() xb, degb, degb_err = self.xb, self.degb, self.degb_err if len(ms)==1: clr = 'r' deg = self.degrees.sum(0) / float(self.degrees.sum()) ax.loglog(self.k, deg[self.m:], 'b.') else: clr = c[i] errax = ax.errorbar(xb, degb, yerr=degb_err, color=clr, fmt='-', lw=1.5, capsize=2, errorevery=1, label='$m = {0}$'.format(ms[i])) errax[-1][0].set_linestyle('-') errax[-1][0].set_linewidth(0.8) ax.loglog(self.k, self.p, 'k--', lw=1.2) ax.set_xlabel('Degree $k$') ax.set_ylabel('Degree distribution $p(k)$') ax.legend(loc='lower left') def plot_Ns(self, Ns=[1e2, 1e3, 1e4, 1e5, 1e6, 1e7]): ''' Plots distribution for different N, k_max against N and collapses data. ''' Ns = np.array(Ns, dtype='float') fig = plt.figure() ax = fig.add_subplot(111) figkn = plt.figure() axkn = figkn.add_subplot(111) figcol = plt.figure() axcol = figcol.add_subplot(111) c = cm.rainbow(np.linspace(0, 1, len(Ns))) kavg = np.zeros(len(Ns), dtype='float') kerr = np.zeros(len(Ns), dtype='float') for i in xrange(len(Ns)): print 'N =', int(np.log10(Ns[i])), 'loading..' self.setN(Ns[i]) self.run_graph() kavg[i] = self.kmax.mean() kerr[i] = self.kmax.std() self.process_deg() xb, degb, degb_err = self.xb, self.degb, self.degb_err errax = ax.errorbar(xb, degb, yerr=degb_err, color=c[i], fmt='-', lw=1.5, capsize=2, errorevery=1, label='$10^{0}$'.format(int(np.log10(Ns[i])))) errax[-1][0].set_linestyle('-') errax[-1][0].set_linewidth(0.8) ax.loglog(self.k, self.p, 'k--') axcol.loglog(xb/kavg[i], degb/self.pb, color=c[i], label='$10^{0}$'.format(int(np.log10(Ns[i])))) ax.set_xlabel('Degree $k$') ax.set_ylabel('Degree distribution $p(k)$') ax.legend(loc='upper right', ncol=1, prop={'size':20}) axcol.set_xlabel('Scaled degree $k/k_{max}$') axcol.set_ylabel('Scaled distribution $p(k)/P(k)$') axcol.axhline(y=1, c='k', ls='--') axcol.legend(loc='lower left', ncol=2) f = lambda x, m, b: m*x+b if self.method==0: z, cov = sco.curve_fit(f, np.log(Ns), np.log(kavg), sigma=kerr/kavg, absolute_sigma=True) y = np.exp(z[1])*Ns**z[0] elif self.method==100: z, cov = sco.curve_fit(f, np.log(Ns), kavg, sigma=kerr, absolute_sigma=True) y = z[1] + z[0]*np.log(Ns) axkn.errorbar(Ns, kavg, yerr=kerr, fmt='bo') axkn.plot(Ns, y, 'r--') axkn.set_xlabel('System size $N$') axkn.set_ylabel('Maximum degree $k_{max}$') return z[0], z[1], cov def plot_ls(self, els=[0, 1, 2, 5, 10]): ''' Performs analysis on different lengths of random walker, producing relevant plots. ''' els = np.array(els) fig = plt.figure() ax = fig.add_subplot(111) figsl = plt.figure() axsl = figsl.add_subplot(111) c = cm.rainbow(np.linspace(0, 1, len(els))) zs = np.zeros(len(els), dtype='float') for i in xrange(len(els)): print 'el =', els[i], 'loading..' self.setmethod(100+els[i]) self.run_graph() self.process_deg() xb, degb = self.xb, self.degb ax.loglog(xb, degb, '-', color=c[i], lw=1.5, label='$\ell = {0}$'.format(els[i])) z = np.polyfit(np.log(xb), np.log(degb), 1) zs[i] = z[0] for method in [0, 100]: self.setmethod(method) self.setkp() ax.loglog(self.k, self.p, 'k--') ax.set_xlabel('Degree $k$') ax.set_ylabel('Degree distribution $p(k)$') ax.legend(loc='upper right', ncol=2, prop={'size':20}) if self.m==1: axsl.plot(els[els%2==0], zs[els%2==0], 'x-.', c[0], lw=1.5, markersize=9) axsl.plot(els[els%2==1], zs[els%2==1], 'x-.', c[-1], lw=1.5, markersize=9) else: axsl.plot(els, zs, 'rx-.', lw=1.5, markersize=9) axsl.axhline(y=-3, c='k', ls='--', lw=1.) axsl.set_xlabel('Walk length $\ell$') axsl.set_ylabel('Degree distribution slope $\gamma$')
mit
raghavrv/scikit-learn
examples/linear_model/plot_iris_logistic.py
119
1679
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Logistic Regression 3-class Classifier ========================================================= Show below is a logistic-regression classifiers decision boundaries on the `iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints are colored according to their labels. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target h = .02 # step size in the mesh logreg = linear_model.LogisticRegression(C=1e5) # we create an instance of Neighbours Classifier and fit the data. logreg.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1, figsize=(4, 3)) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
moosekaka/sweepython
mombud/functions/vtk_mbplots.py
1
11238
# -*- coding: utf-8 -*- """ Created on Sat Jun 11 16:10:16 2016 Module for plots of analysis of mother bud function in budding yeast """ import os import os.path as op import inspect import traceback import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from wrappers import FalseException, UsageError # pylint: disable=C0103 plt.rcParams['font.family'] = 'DejaVu Sans' plt.close('all') error_text = ("labelhandler() called with type {}, which does not exist for" " this handler. Defaulting to unlabelled title for subplot") def get_group_counts(data, **kwargs): """ Get counts of groups based on the first columns name or a groupkey """ try: groupkey = kwargs.get('group_key', data.columns[0]) n_counts = (data .groupby([groupkey, 'variable']) .size() .xs(data['variable'] .iloc[0], level='variable')) except KeyError: try: dfcopy = data.reset_index() c1, c2, _, c4, _ = dfcopy.columns n_counts = (pd.pivot_table( dfcopy, c1, index=c2, columns=c4, aggfunc=len)).iloc[:, 0] except ValueError: traceback.print_stack(limit=4) print "Not enough columns to do group counts, skipping" return n_counts class labelhandler(object): """ Callable object for labeling subplot titles""" def __init__(self, htype='normal'): self._htype = htype self._fundict = {'normal': 'fun1', 'facet': 'fun2', 'rowfacet': 'fun2', 'rsqr': 'fun2'} def __call__(self, handle_to_ax, label_dict): try: functype = self._fundict[self._htype] getattr(self, functype)(handle_to_ax, label_dict) except KeyError as e: print error_text.format(e) def fun1(self, handle, labeldic): """ for normal plots, i.e. 'unfacetted' """ assert self._htype == 'normal' # sanity check here labels = [xtik.get_text().strip() for xtik in handle.axes.get_xticklabels()] new_labels = [u'{}\n N={}' .format(old_lab, labeldic[old_lab]) for old_lab in labels] handle.axes.set_xticklabels(new_labels) def fun2(self, handle, labeldic): """ for facetted plots """ for ax in handle.axes.flat: oldtitle = ax.get_title() if self._htype == 'rowfacet': # multi-row facetted plots" media = (oldtitle # carbon/media type label .split('|')[0] .split('=')[1] .strip()) budv = float(oldtitle # budvol label .split('=')[-1] .strip()) newtitle = (u'{}, N = {}' .format(media, labeldic.xs(media).get([budv])[0])) ax.set_title(newtitle) else: oldtitle = oldtitle.split('=')[1].strip() if self._htype != 'rsqr': # defaults for "facet" ax.set_title(u'{}, N={}' .format(oldtitle, labeldic[oldtitle])) else: ax.set_title(u'{}, R^2={:5.3f}' # adds R^2 corr. labels" .format(oldtitle, labeldic[oldtitle])) class plviol(object): """ Wrapper class for generating violinplots """ # class/wrapper level attr. def_init_kwargs = ('col_order', 'default_ylims', 'labeller') def __init__(self, plt_type='violinplot', nofig=False, **kwargs): """ Store a ref to a seaborn plot type method in `pltobj`. `nofig` toggle to disable subplot in case of FacetGrid. """ self.pltobj = getattr(sns, plt_type) if not nofig: self.fig, self.ax = plt.subplots(1, 1) for name in type(self).def_init_kwargs: try: setattr(self, name, kwargs[name]) except KeyError: continue self.data = None self.x = None self.y = None self.n_counts = None self.ylim = None def get_ylims(self, df): """ get the ylims for the long form data set """ try: assert self.default_ylims y_lims = (df.iloc[:, -1] .dropna() .quantile(self.default_ylims) .tolist()) except AttributeError: print "default_ylims not specified, returning minmax instead" y_lims = (df.iloc[:, -1].min(), df.iloc[:, -1].max()) return tuple(y_lims) def get_group_counts(self, **kwargs): """ Get counts of groups based on the first columns name or a groupkey """ try: groupkey = kwargs.get('group_key', self.data.columns[0]) self.n_counts = (self.data .groupby([groupkey, 'variable']) .size() .xs(self.data['variable'] .iloc[0], level='variable')) except KeyError: try: dfcopy = self.data.reset_index() c1, c2, _, c4, _ = dfcopy.columns self.n_counts = (pd.pivot_table( dfcopy, c1, index=c2, columns=c4, aggfunc=len)).iloc[:, 0] except ValueError: traceback.print_stack(limit=4) print "Not enough columns to do group counts, skipping" def call_sns_plotter(self, order, ylims, **kwargs): """ Draws the axes obj using the seaborn method self.pltobj """ allowable = inspect.getargspec(self.pltobj).args kws = {k: v for k, v in kwargs.iteritems() if k in allowable} h = self.pltobj(data=self.data, ax=self.ax, order=order, **kws) try: h.set_ylim(ylims[0], ylims[1]) h.set_title(kwargs.get('title')) except TypeError: pass try: h.set(**kwargs['setargs']) except KeyError: pass def label_group_counts(self, obj): try: self.labeller(obj, self.n_counts) except (AttributeError, TypeError): print "Skipping labels of categorical vars \n" def plt(self, data=None, ylim=None, **kwargs): """ Parameters ---------- data : DataFrame data for plot input ylim : [None | 'auto' | tuple] `ylim` is an axes level parameter to manually set the y-limits,\ as the axes level seaborn plot functions do not have a `ylim`\ parameter, unlike FacetGrid. If set to `auto`, ylim is calculated according to a default_ylims parameter during object instantiation """ self.data = data try: if self.data is None: raise FalseException except FalseException: traceback.print_stack(limit=4) raise UsageError("Must provide 'data' arg") # get the y-axis limits if ylim is not None: if ylim == 'auto': self.ylim = self.get_ylims(self.data) else: self.ylim = ylim # get the counts for the categorical vars self.get_group_counts(**kwargs) # if a subset of data is chosen, make col_order a subset too try: col_order_sub = [i for i in self.col_order if i in self.n_counts] except (AttributeError, TypeError): col_order_sub = None # tries to plot using the relevant seaborn plot type, except if this # instance's plt() is overriden try: self.call_sns_plotter(order=col_order_sub, ylims=self.ylim, **kwargs) except AttributeError: pass else: self.label_group_counts(self.ax) def save_figure(self, savepath=None): """ saves figure to path """ if savepath is None: savepath = op.join(os.getcwd(), '_fig.png') self.fig.savefig(savepath) def turn_off_legend(self): """ toggle to remove legend """ try: self.ax.legend_.remove() except AttributeError: pass class plbox(plviol): """ subclass of plviol to have boxplot mean instead of violinplot medians """ def plt(self, **kwargs): """ extends plviol.plt to have boxplot mean markers """ # plt.boxplot doesnt have kwargs, hence need to check kws allowed allowable = inspect.getargspec(sns.boxplot).args kws = {k: v for k, v in kwargs.iteritems() if k in allowable} sns.boxplot(ax=self.ax, showmeans=True, showbox=False, showcaps=False, showfliers=False, order=self.col_order, medianprops={'linewidth': 0}, whiskerprops={'linewidth': 0}, meanprops={'marker': '_', 'c': 'w', 'ms': 5, 'markeredgewidth': 2}, **kws) # plt this after boxplot so that labelling macros are preserved super(plbox, self).plt(**kwargs) class plfacet(plviol): """ extends plviol to those with facet objects """ def __init__(self, **kwargs): # nofig: ensure that the base class does not call plt.figure super(plfacet, self).__init__(nofig=True, **kwargs) self.facet_obj = sns.FacetGrid self.ylim = None def plt(self, data=None, mapargs=None, **kwargs): super(plfacet, self).plt(data=data, **kwargs) # consumes ylim # add. args for FacetGrid.set() method setargs = kwargs.pop('setargs', None) allowable = inspect.getargspec(sns.FacetGrid.__init__).args kws = {k: v for k, v in kwargs.iteritems() if k in allowable} kws.update({'ylim': self.ylim}) self.facet_obj = sns.FacetGrid(data, **kws) try: self.facet_obj.map(self.pltobj, *mapargs).set(**setargs) except TypeError: try: self.facet_obj.map(self.pltobj, *mapargs) except: raise # traceback.print_stack(limit=4) # raise UsageError("Missing kwargs `mapargs`") self.label_group_counts(self.facet_obj) def save_figure(self, savepath=None): if savepath is None: savepath = op.join(os.getcwd(), '_fig.png') self.facet_obj.savefig(savepath)
mit
andycasey/narrowband-fourier-filter
gui_core.py
1
1568
# coding: utf-8 """ Matplotlib figure functionality for the Traits GUI. """ from __future__ import division, print_function __author__ = "Andy Casey <[email protected]>" # Third-party import wx from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg from matplotlib.backends.backend_wx import NavigationToolbar2Wx from matplotlib.pyplot import Figure, subplot from matplotlib.ticker import MaxNLocator # Editor factories for matplotlib figures from traitsui.wx.editor import Editor from traitsui.wx.basic_editor_factory import BasicEditorFactory class _MPLFigureEditor(Editor): """ Editor class for containing a matplotlib figure within a Traits GUI. """ scrollable = True def init(self, parent): self.control = self._create_canvas(parent) self.set_tooltip() def update_editor(self): pass def _create_canvas(self, parent): """ Create the matplotlib canvas """ panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN) sizer = wx.BoxSizer(wx.VERTICAL) panel.SetSizer(sizer) mpl_control = FigureCanvasWxAgg(panel, -1, self.value) sizer.Add(mpl_control, 1, wx.LEFT | wx.TOP | wx.GROW) toolbar = NavigationToolbar2Wx(mpl_control) sizer.Add(toolbar, 0, wx.EXPAND) self.value.canvas.SetMinSize((10,10)) return panel class MPLFigureEditor(BasicEditorFactory): """ Factory class for generating editors that contain matplotlib figures and can be placed within a Traits GUI. """ klass = _MPLFigureEditor
mit
dgwakeman/mne-python
mne/decoding/tests/test_ems.py
19
1969
# Author: Denis A. Engemann <[email protected]> # # License: BSD (3-clause) import os.path as op from nose.tools import assert_equal, assert_raises from mne import io, Epochs, read_events, pick_types from mne.utils import requires_sklearn from mne.decoding import compute_ems data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') curdir = op.join(op.dirname(__file__)) raw_fname = op.join(data_dir, 'test_raw.fif') event_name = op.join(data_dir, 'test-eve.fif') tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, vis_l=3) @requires_sklearn def test_ems(): """Test event-matched spatial filters""" raw = io.Raw(raw_fname, preload=False) # create unequal number of events events = read_events(event_name) events[-2, 2] = 3 picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') picks = picks[1:13:3] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l']) epochs.equalize_event_counts(epochs.event_id, copy=False) assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah']) surrogates, filters, conditions = compute_ems(epochs) assert_equal(list(set(conditions)), [1, 3]) events = read_events(event_name) event_id2 = dict(aud_l=1, aud_r=2, vis_l=3) epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) epochs.equalize_event_counts(epochs.event_id, copy=False) n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']]) assert_raises(ValueError, compute_ems, epochs) surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l']) assert_equal(n_expected, len(surrogates)) assert_equal(n_expected, len(conditions)) assert_equal(list(set(conditions)), [2, 3]) raw.close()
bsd-3-clause
gfyoung/pandas
pandas/tests/extension/base/groupby.py
1
4033
import pytest import pandas as pd import pandas._testing as tm from .base import BaseExtensionTests class BaseGroupbyTests(BaseExtensionTests): """Groupby-specific tests.""" def test_grouping_grouper(self, data_for_grouping): df = pd.DataFrame( {"A": ["B", "B", None, None, "A", "A", "B", "C"], "B": data_for_grouping} ) gr1 = df.groupby("A").grouper.groupings[0] gr2 = df.groupby("B").grouper.groupings[0] tm.assert_numpy_array_equal(gr1.grouper, df.A.values) tm.assert_extension_array_equal(gr2.grouper, data_for_grouping) @pytest.mark.parametrize("as_index", [True, False]) def test_groupby_extension_agg(self, as_index, data_for_grouping): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) result = df.groupby("B", as_index=as_index).A.mean() _, index = pd.factorize(data_for_grouping, sort=True) index = pd.Index(index, name="B") expected = pd.Series([3, 1, 4], index=index, name="A") if as_index: self.assert_series_equal(result, expected) else: expected = expected.reset_index() self.assert_frame_equal(result, expected) def test_groupby_agg_extension(self, data_for_grouping): # GH#38980 groupby agg on extension type fails for non-numeric types df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) expected = df.iloc[[0, 2, 4, 7]] expected = expected.set_index("A") result = df.groupby("A").agg({"B": "first"}) self.assert_frame_equal(result, expected) result = df.groupby("A").agg("first") self.assert_frame_equal(result, expected) result = df.groupby("A").first() self.assert_frame_equal(result, expected) def test_groupby_extension_no_sort(self, data_for_grouping): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) result = df.groupby("B", sort=False).A.mean() _, index = pd.factorize(data_for_grouping, sort=False) index = pd.Index(index, name="B") expected = pd.Series([1, 3, 4], index=index, name="A") self.assert_series_equal(result, expected) def test_groupby_extension_transform(self, data_for_grouping): valid = data_for_grouping[~data_for_grouping.isna()] df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid}) result = df.groupby("B").A.transform(len) expected = pd.Series([3, 3, 2, 2, 3, 1], name="A") self.assert_series_equal(result, expected) def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) df.groupby("B").apply(groupby_apply_op) df.groupby("B").A.apply(groupby_apply_op) df.groupby("A").apply(groupby_apply_op) df.groupby("A").B.apply(groupby_apply_op) def test_groupby_apply_identity(self, data_for_grouping): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) result = df.groupby("A").B.apply(lambda x: x.array) expected = pd.Series( [ df.B.iloc[[0, 1, 6]].array, df.B.iloc[[2, 3]].array, df.B.iloc[[4, 5]].array, df.B.iloc[[7]].array, ], index=pd.Index([1, 2, 3, 4], name="A"), name="B", ) self.assert_series_equal(result, expected) def test_in_numeric_groupby(self, data_for_grouping): df = pd.DataFrame( { "A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping, "C": [1, 1, 1, 1, 1, 1, 1, 1], } ) result = df.groupby("A").sum().columns if data_for_grouping.dtype._is_numeric: expected = pd.Index(["B", "C"]) else: expected = pd.Index(["C"]) tm.assert_index_equal(result, expected)
bsd-3-clause
gotomypc/Passage
examples/newsgroup.py
2
1787
from sklearn.datasets import fetch_20newsgroups categories = ['alt.atheism', 'sci.space'] newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'), categories=categories) newsgroups_test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'), categories=categories) print len(newsgroups_train.data), len(newsgroups_test.data) from sklearn import metrics from passage.preprocessing import Tokenizer from passage.layers import Embedding, GatedRecurrent, Dense from passage.models import RNN from passage.utils import save tokenizer = Tokenizer(min_df=10, max_features=50000) X_train = tokenizer.fit_transform(newsgroups_train.data) X_test = tokenizer.transform(newsgroups_test.data) Y_train = newsgroups_train.target Y_test = newsgroups_test.target print tokenizer.n_features layers = [ Embedding(size=128, n_features=tokenizer.n_features), GatedRecurrent(size=256, activation='tanh', gate_activation='steeper_sigmoid', init='orthogonal', seq_output=False), Dense(size=1, activation='sigmoid', init='orthogonal') # sigmoid for binary classification ] model = RNN(layers=layers, cost='bce') # bce is classification loss for binary classification and sigmoid output for i in range(2): model.fit(X_train, Y_train, n_epochs=1) tr_preds = model.predict(X_train[:len(Y_test)]) te_preds = model.predict(X_test) tr_acc = metrics.accuracy_score(Y_train[:len(Y_test)], tr_preds > 0.5) te_acc = metrics.accuracy_score(Y_test, te_preds > 0.5) print i, tr_acc, te_acc # dataset too small to fully utilize Passage save(model, 'model.pkl')
mit
mne-tools/mne-python
mne/viz/tests/test_circle.py
14
5024
# Authors: Alexandre Gramfort <[email protected]> # Denis Engemann <[email protected]> # Martin Luessi <[email protected]> # # License: Simplified BSD import numpy as np import pytest import matplotlib.pyplot as plt from mne.viz import plot_connectivity_circle, circular_layout def test_plot_connectivity_circle(): """Test plotting connectivity circle.""" node_order = ['frontalpole-lh', 'parsorbitalis-lh', 'lateralorbitofrontal-lh', 'rostralmiddlefrontal-lh', 'medialorbitofrontal-lh', 'parstriangularis-lh', 'rostralanteriorcingulate-lh', 'temporalpole-lh', 'parsopercularis-lh', 'caudalanteriorcingulate-lh', 'entorhinal-lh', 'superiorfrontal-lh', 'insula-lh', 'caudalmiddlefrontal-lh', 'superiortemporal-lh', 'parahippocampal-lh', 'middletemporal-lh', 'inferiortemporal-lh', 'precentral-lh', 'transversetemporal-lh', 'posteriorcingulate-lh', 'fusiform-lh', 'postcentral-lh', 'bankssts-lh', 'supramarginal-lh', 'isthmuscingulate-lh', 'paracentral-lh', 'lingual-lh', 'precuneus-lh', 'inferiorparietal-lh', 'superiorparietal-lh', 'pericalcarine-lh', 'lateraloccipital-lh', 'cuneus-lh', 'cuneus-rh', 'lateraloccipital-rh', 'pericalcarine-rh', 'superiorparietal-rh', 'inferiorparietal-rh', 'precuneus-rh', 'lingual-rh', 'paracentral-rh', 'isthmuscingulate-rh', 'supramarginal-rh', 'bankssts-rh', 'postcentral-rh', 'fusiform-rh', 'posteriorcingulate-rh', 'transversetemporal-rh', 'precentral-rh', 'inferiortemporal-rh', 'middletemporal-rh', 'parahippocampal-rh', 'superiortemporal-rh', 'caudalmiddlefrontal-rh', 'insula-rh', 'superiorfrontal-rh', 'entorhinal-rh', 'caudalanteriorcingulate-rh', 'parsopercularis-rh', 'temporalpole-rh', 'rostralanteriorcingulate-rh', 'parstriangularis-rh', 'medialorbitofrontal-rh', 'rostralmiddlefrontal-rh', 'lateralorbitofrontal-rh', 'parsorbitalis-rh', 'frontalpole-rh'] label_names = ['bankssts-lh', 'bankssts-rh', 'caudalanteriorcingulate-lh', 'caudalanteriorcingulate-rh', 'caudalmiddlefrontal-lh', 'caudalmiddlefrontal-rh', 'cuneus-lh', 'cuneus-rh', 'entorhinal-lh', 'entorhinal-rh', 'frontalpole-lh', 'frontalpole-rh', 'fusiform-lh', 'fusiform-rh', 'inferiorparietal-lh', 'inferiorparietal-rh', 'inferiortemporal-lh', 'inferiortemporal-rh', 'insula-lh', 'insula-rh', 'isthmuscingulate-lh', 'isthmuscingulate-rh', 'lateraloccipital-lh', 'lateraloccipital-rh', 'lateralorbitofrontal-lh', 'lateralorbitofrontal-rh', 'lingual-lh', 'lingual-rh', 'medialorbitofrontal-lh', 'medialorbitofrontal-rh', 'middletemporal-lh', 'middletemporal-rh', 'paracentral-lh', 'paracentral-rh', 'parahippocampal-lh', 'parahippocampal-rh', 'parsopercularis-lh', 'parsopercularis-rh', 'parsorbitalis-lh', 'parsorbitalis-rh', 'parstriangularis-lh', 'parstriangularis-rh', 'pericalcarine-lh', 'pericalcarine-rh', 'postcentral-lh', 'postcentral-rh', 'posteriorcingulate-lh', 'posteriorcingulate-rh', 'precentral-lh', 'precentral-rh', 'precuneus-lh', 'precuneus-rh', 'rostralanteriorcingulate-lh', 'rostralanteriorcingulate-rh', 'rostralmiddlefrontal-lh', 'rostralmiddlefrontal-rh', 'superiorfrontal-lh', 'superiorfrontal-rh', 'superiorparietal-lh', 'superiorparietal-rh', 'superiortemporal-lh', 'superiortemporal-rh', 'supramarginal-lh', 'supramarginal-rh', 'temporalpole-lh', 'temporalpole-rh', 'transversetemporal-lh', 'transversetemporal-rh'] group_boundaries = [0, len(label_names) / 2] node_angles = circular_layout(label_names, node_order, start_pos=90, group_boundaries=group_boundaries) con = np.random.RandomState(0).randn(68, 68) plot_connectivity_circle(con, label_names, n_lines=300, node_angles=node_angles, title='test', ) pytest.raises(ValueError, circular_layout, label_names, node_order, group_boundaries=[-1]) pytest.raises(ValueError, circular_layout, label_names, node_order, group_boundaries=[20, 0]) plt.close('all')
bsd-3-clause
scholer/nascent
examples/duplex_div1vs2/duplex_div1vs2_analysis.py
2
6184
#!/usr/bin/env python # -*- coding: utf-8 -*- ## Copyright 2015 Rasmus Scholer Sorensen, [email protected] ## ## This program is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see <http://www.gnu.org/licenses/>. # pylint: disable=W0142,C0103,C0301,W0141 """ Run a single analysis of a single duplex. Plotting libraries (2D plotting): * matplotlib * seaborn and pandas both uses matplotlib as backend. * ggplot (R-like plotting) * bokeh (targets webbrowser visualization) * pygal - svg-based plotting, simpler and more pythonic than matplotlib but less features. * Chaco - interactive plotting wtih Qt * plotly - API to online plotting/visualization service. * GR - OpenGL visualization, can be used as a matplotlib backend with improved performance. * Vispy - High-performance GPU 2D/3D OpenGL visualization. Can be used as an experimental matplotlib backend. ** Vispy is a combined project of four initial projects, pyqtgraph, visvis, Galry, and Glumpy. Plotting refs: * http://pbpython.com/visualization-tools-1.html """ import os import sys import webbrowser # Run from package home dir or have nascent on your python path: sys.path.insert(0, ".") print("os.path.abspath('.'):", os.path.abspath('.')) scriptdir = os.path.dirname(os.path.abspath(__file__)) examples_dir = os.path.dirname(scriptdir) LIBPATH = os.path.dirname(examples_dir) try: import nascent except ImportError: sys.path.insert(0, LIBPATH) import nascent from nascent.stat_analysis.plotting import load_pyplot, plot_tot_vs_time from nascent.stat_analysis.processing import load_multiple_stats #, process_stats from nascent.stat_analysis.meltingcurve import plot_thermodynamic_meltingcurve def main(): plot_tot_hyb = True plot_tot_stacked = False plot_melting_curve = False # stats, statsfolders = load_stats() structure_runidxs = {"duplex_16bp-d1": [-1], "duplex_16bp-d2": [-1]} line_colors = dict(zip(structure_runidxs.keys(), 'rgbcmk'[:len(structure_runidxs)])) structure_stats = {} for structure, runidxs in structure_runidxs.items(): structure_stats[structure] = dict(zip(('stats', 'statsfolders'), load_multiple_stats(runidxs=runidxs, basedir=scriptdir, structure=structure, process=True)), runidxs=runidxs) structure_stats[structure]['color'] = line_colors[structure] # structure_stats[structure] = {'runidxs': runidxs} # structure_stats = {structure: # for structure, runidxs in structure_runidxs.items()} # # structure_stats = {structure: load_multiple_stats(runidxs=runidxs, basedir=scriptdir, # structure=structure, process=True) # for structure, runidxs in structure_runidxs.items()} #stats, statsfolders = load_multiple_stats(runidxs=runidxs, basedir=scriptdir, process=True) #statsfolder = statsfolders[0] ## Process (returns a Pandas DataFrame): # shift_tau_for_duration is for older stats where stats were collected *after* changing state. # stats = [process_stats(runstats) for runstats in stats] # Edit: Done in load_multiple_stats through process=True. pyplot = load_pyplot() ## Plot fraction of hybridized domains: if plot_tot_hyb: ax = None # Instead of passing plot parameters through via function args, consider using the # pd.plot_params.use context manager. EDIT: Currently only works for xaxis.compat option :\ # with pd.plot_params.use('logx', False): # "Specific lines can be excluded from the automatic legend element selection by defining a label # starting with an underscore." for structure, data in structure_stats.items(): #runidxs = structure_runidxs[structure] runidxs = data['runidxs'] stats = data['stats'] color = data['color'] for runstats, runidx in zip(stats, runidxs): #plotfilename = os.path.join(statsfolder, "f_hybridized_domains_avg_vs_time.png") ax = plot_tot_vs_time(runstats, filename=None, #plotfilename, ax=ax, linestyles=":-", colors=color*2, labels=("_", "%s run %s" % (structure, runidx)), legend=False ) # import pdb # pdb.set_trace() handles, labels = zip(*[(hdl, lbl) for hdl, lbl in zip(*ax.get_legend_handles_labels()) if "avg" not in lbl and lbl[0] != "_"]) pyplot.legend(handles=handles, labels=labels, loc="lower right") plotfilename = os.path.join(scriptdir, "f_hybridized_domains_avg_vs_time.png") pyplot.savefig(plotfilename) webbrowser.open(plotfilename) ## Plot stacked ends: if plot_tot_stacked: plotfilename = os.path.join(statsfolder, "f_stacked_ends_avg_vs_time.png") ax = plot_tot_vs_time(stats, fields=('f_stacked_ends_avg',), add_average=False, filename=plotfilename) webbrowser.open(plotfilename) ## Plot melting curve? if plot_melting_curve: meltingcurvefn = "thermo_melting.yaml" meltingcurvefn = os.path.join(statsfolder, meltingcurvefn) print("Plotting melting curve from file:", meltingcurvefn) plot_thermodynamic_meltingcurve(meltingcurvefn, KtoC=False) if __name__ == "__main__": main()
agpl-3.0
SuperSaiyanSSS/SinaWeiboSpider
ml/random_forest.py
1
6285
# -*- coding: utf-8 -* from __future__ import unicode_literals, print_function from __future__ import division import sklearn import pandas as pd import json import math import csv import pymongo import sklearn import sys sys.path.append("..") from a1 import base from a1 import sina_store reload(sys) sys.setdefaultencoding('utf-8') class MachineLearning(base.SinaBaseObject): def __init__(self): self.is_First = True self.is_First_2 = True self.gbc = '' self.dtc = '' self.rfc = '' def set_feature_vector_dict(self, feature_vector_dict): self.clean_feture_vector_dict(feature_vector_dict, is_first=self.is_First) self.is_First = False def set_test_feature_vector_dict(self, feature_vector_dict): self.clean_test_feture_vector_dict(feature_vector_dict, is_first_2=self.is_First_2) self.is_First_2 = False # 将传入的字典转化为csv文件 @staticmethod def clean_feture_vector_dict(feature_vector_dict, is_first=False): with open('names.csv', 'ab') as csvfile: fieldnames = ['uid', 'similarity', 'platform', 'reputation', 'entropy', 'human_or_machine'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) if is_first: writer.writeheader() writer.writerow( {'uid': feature_vector_dict['uid'], 'similarity': feature_vector_dict['similarity'], 'platform': feature_vector_dict['platform'], 'reputation': feature_vector_dict['reputation'], 'entropy': feature_vector_dict['entropy'], 'human_or_machine': feature_vector_dict['human_or_machine'] } ) @staticmethod def clean_test_feture_vector_dict(feature_vector_dict, is_first_2=False): with open('needs.csv', 'ab') as csvfile: fieldnames = ['uid', 'similarity', 'platform', 'reputation', 'entropy', 'human_or_machine'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) if is_first: writer.writeheader() writer.writerow( {'uid': feature_vector_dict['uid'], 'similarity': feature_vector_dict['similarity'], 'platform': feature_vector_dict['platform'], 'reputation': feature_vector_dict['reputation'], 'entropy': feature_vector_dict['entropy'], 'human_or_machine': feature_vector_dict['human_or_machine'] } ) # 进行单一决策树和随机森林的训练模型及检验 def rand_forest_train(self): # 读取本地用户特征信息 users = pd.read_csv('names.csv') # 选取similarity、platform、reputation、entropy作为判别人类或机器的特征 X = users[['similarity', 'platform', 'reputation', 'entropy']] y = users['human_or_machine'] # 对原始数据进行分割, 25%的数据用于测试 from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=33) # 对类别特征进行转化,成为特征向量 from sklearn.feature_extraction import DictVectorizer vec = DictVectorizer(sparse=False) X_train = vec.fit_transform(X_train.to_dict(orient='record')) X_test = vec.transform(X_test.to_dict(orient='record')) # 使用单一决策树进行集成模型的训练及预测分析 from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(X_train, y_train) dtc_y_pred = dtc.predict(X_test) # 使用随机森林分类器进行集成模型的训练及预测分析 from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() rfc.fit(X_train, y_train) rfc_y_pred = rfc.predict(X_test) # 使用梯度提升决策树进行集成模型的训练及预测分析 from sklearn.ensemble import GradientBoostingClassifier gbc = GradientBoostingClassifier() gbc.fit(X_train, y_train) gbc_y_pred = gbc.predict(X_test) from sklearn.metrics import classification_report # 输出单一决策树在测试集上的分类准确性, 以及更加详细的精确率 召回率 F1指标 print("单一决策树的准确性为", dtc.score(X_test, y_test)) print(classification_report(dtc_y_pred, y_test)) # 输出随机森林分类器在测试集上的分类准确性,以及更加详细的精确率 召回率 F1指标 print("随机森林分类器的准确性为", rfc.score(X_test, y_test)) print(classification_report(rfc_y_pred, y_test)) # 输出梯度提升决策树在测试集上的分类准确性,以及更加详细的精确率 召回率 F1指标 print("梯度提升决策树的准确性为", gbc.score(X_test, y_test)) print(classification_report(gbc_y_pred, y_test)) users = pd.read_csv('values.csv') # 检验是否为机器或人类 X = users[['similarity', 'platform', 'reputation', 'entropy']] X = vec.transform(X.to_dict(orient='record')) print(rfc.predict(X)) self.dtc = dtc self.rfc = rfc self.gbc = gbc def get_dict_from_weibo_table(): ml = MachineLearning() sina_store_object = sina_store.SinaStore() sina_store_object.weibo_table = sina_store_object.db['human_vector_info'] iter = sina_store_object.get_stored_information() while True: try: info_dict = next(iter) ml.set_feature_vector_dict(info_dict) except StopIteration: break sina_store_object.weibo_table = sina_store_object.db['machine_vector_info'] iter = sina_store_object.get_stored_information() while True: try: info_dict = next(iter) ml.set_feature_vector_dict(info_dict) except StopIteration: break print("已结束 正在训练模型。。。") ml.rand_forest_train() def start_training(): get_dict_from_weibo_table() if __name__ == "__main__": start_training()
mit
altairpearl/scikit-learn
sklearn/model_selection/_search.py
2
44970
""" The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the parameters of an estimator. """ from __future__ import print_function from __future__ import division # Author: Alexandre Gramfort <[email protected]>, # Gael Varoquaux <[email protected]> # Andreas Mueller <[email protected]> # Olivier Grisel <[email protected]> # License: BSD 3 clause from abc import ABCMeta, abstractmethod from collections import Mapping, namedtuple, Sized, defaultdict from functools import partial, reduce from itertools import product import operator import warnings import numpy as np from ..base import BaseEstimator, is_classifier, clone from ..base import MetaEstimatorMixin from ._split import check_cv from ._validation import _fit_and_score from ..exceptions import NotFittedError from ..externals.joblib import Parallel, delayed from ..externals import six from ..utils import check_random_state from ..utils.fixes import sp_version from ..utils.fixes import rankdata from ..utils.random import sample_without_replacement from ..utils.validation import indexable, check_is_fitted from ..utils.metaestimators import if_delegate_has_method from ..metrics.scorer import check_scoring __all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point', 'ParameterSampler', 'RandomizedSearchCV'] class ParameterGrid(object): """Grid of parameters with a discrete number of values for each. Can be used to iterate over parameter value combinations with the Python built-in function iter. Read more in the :ref:`User Guide <search>`. Parameters ---------- param_grid : dict of string to sequence, or sequence of such The parameter grid to explore, as a dictionary mapping estimator parameters to sequences of allowed values. An empty dict signifies default parameters. A sequence of dicts signifies a sequence of grids to search, and is useful to avoid exploring parameter combinations that make no sense or have no effect. See the examples below. Examples -------- >>> from sklearn.model_selection import ParameterGrid >>> param_grid = {'a': [1, 2], 'b': [True, False]} >>> list(ParameterGrid(param_grid)) == ( ... [{'a': 1, 'b': True}, {'a': 1, 'b': False}, ... {'a': 2, 'b': True}, {'a': 2, 'b': False}]) True >>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}] >>> list(ParameterGrid(grid)) == [{'kernel': 'linear'}, ... {'kernel': 'rbf', 'gamma': 1}, ... {'kernel': 'rbf', 'gamma': 10}] True >>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1} True See also -------- :class:`GridSearchCV`: Uses :class:`ParameterGrid` to perform a full parallelized parameter search. """ def __init__(self, param_grid): if isinstance(param_grid, Mapping): # wrap dictionary in a singleton list to support either dict # or list of dicts param_grid = [param_grid] self.param_grid = param_grid def __iter__(self): """Iterate over the points in the grid. Returns ------- params : iterator over dict of string to any Yields dictionaries mapping each estimator parameter to one of its allowed values. """ for p in self.param_grid: # Always sort the keys of a dictionary, for reproducibility items = sorted(p.items()) if not items: yield {} else: keys, values = zip(*items) for v in product(*values): params = dict(zip(keys, v)) yield params def __len__(self): """Number of points on the grid.""" # Product function that can handle iterables (np.product can't). product = partial(reduce, operator.mul) return sum(product(len(v) for v in p.values()) if p else 1 for p in self.param_grid) def __getitem__(self, ind): """Get the parameters that would be ``ind``th in iteration Parameters ---------- ind : int The iteration index Returns ------- params : dict of string to any Equal to list(self)[ind] """ # This is used to make discrete sampling without replacement memory # efficient. for sub_grid in self.param_grid: # XXX: could memoize information used here if not sub_grid: if ind == 0: return {} else: ind -= 1 continue # Reverse so most frequent cycling parameter comes first keys, values_lists = zip(*sorted(sub_grid.items())[::-1]) sizes = [len(v_list) for v_list in values_lists] total = np.product(sizes) if ind >= total: # Try the next grid ind -= total else: out = {} for key, v_list, n in zip(keys, values_lists, sizes): ind, offset = divmod(ind, n) out[key] = v_list[offset] return out raise IndexError('ParameterGrid index out of range') class ParameterSampler(object): """Generator on parameters sampled from given distributions. Non-deterministic iterable over random candidate combinations for hyper- parameter search. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not accept a custom RNG instance and always use the singleton RNG from ``numpy.random``. Hence setting ``random_state`` will not guarantee a deterministic iteration whenever ``scipy.stats`` distributions are used to define the parameter search space. Deterministic behavior is however guaranteed from SciPy 0.16 onwards. Read more in the :ref:`User Guide <search>`. Parameters ---------- param_distributions : dict Dictionary where the keys are parameters and values are distributions from which a parameter is to be sampled. Distributions either have to provide a ``rvs`` function to sample from them, or can be given as a list of values, where a uniform distribution is assumed. n_iter : integer Number of parameter settings that are produced. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. Returns ------- params : dict of string to any **Yields** dictionaries mapping each estimator parameter to as sampled value. Examples -------- >>> from sklearn.model_selection import ParameterSampler >>> from scipy.stats.distributions import expon >>> import numpy as np >>> np.random.seed(0) >>> param_grid = {'a':[1, 2], 'b': expon()} >>> param_list = list(ParameterSampler(param_grid, n_iter=4)) >>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items()) ... for d in param_list] >>> rounded_list == [{'b': 0.89856, 'a': 1}, ... {'b': 0.923223, 'a': 1}, ... {'b': 1.878964, 'a': 2}, ... {'b': 1.038159, 'a': 2}] True """ def __init__(self, param_distributions, n_iter, random_state=None): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state def __iter__(self): # check if all distributions are given as lists # in this case we want to sample without replacement all_lists = np.all([not hasattr(v, "rvs") for v in self.param_distributions.values()]) rnd = check_random_state(self.random_state) if all_lists: # look up sampled parameter settings in parameter grid param_grid = ParameterGrid(self.param_distributions) grid_size = len(param_grid) if grid_size < self.n_iter: raise ValueError( "The total space of parameters %d is smaller " "than n_iter=%d. For exhaustive searches, use " "GridSearchCV." % (grid_size, self.n_iter)) for i in sample_without_replacement(grid_size, self.n_iter, random_state=rnd): yield param_grid[i] else: # Always sort the keys of a dictionary, for reproducibility items = sorted(self.param_distributions.items()) for _ in six.moves.range(self.n_iter): params = dict() for k, v in items: if hasattr(v, "rvs"): if sp_version < (0, 16): params[k] = v.rvs() else: params[k] = v.rvs(random_state=rnd) else: params[k] = v[rnd.randint(len(v))] yield params def __len__(self): """Number of points that will be sampled.""" return self.n_iter def fit_grid_point(X, y, estimator, parameters, train, test, scorer, verbose, error_score='raise', **fit_params): """Run fit on one set of parameters. Parameters ---------- X : array-like, sparse matrix or list Input data. y : array-like or None Targets for input data. estimator : estimator object A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. parameters : dict Parameters to be set on estimator for this grid point. train : ndarray, dtype int or bool Boolean mask or indices for training set. test : ndarray, dtype int or bool Boolean mask or indices for test set. scorer : callable or None. If provided must be a scorer callable object / function with signature ``scorer(estimator, X, y)``. verbose : int Verbosity level. **fit_params : kwargs Additional parameter passed to the fit function of the estimator. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Returns ------- score : float Score of this parameter setting on given training / test split. parameters : dict The parameters that have been evaluated. n_samples_test : int Number of test samples in this split. """ score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, error_score) return score, parameters, n_samples_test def _check_param_grid(param_grid): if hasattr(param_grid, 'items'): param_grid = [param_grid] for p in param_grid: for name, v in p.items(): if isinstance(v, np.ndarray) and v.ndim > 1: raise ValueError("Parameter array should be one-dimensional.") check = [isinstance(v, k) for k in (list, tuple, np.ndarray)] if True not in check: raise ValueError("Parameter values for parameter ({0}) need " "to be a sequence.".format(name)) if len(v) == 0: raise ValueError("Parameter values for parameter ({0}) need " "to be a non-empty sequence.".format(name)) # XXX Remove in 0.20 class _CVScoreTuple (namedtuple('_CVScoreTuple', ('parameters', 'mean_validation_score', 'cv_validation_scores'))): # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __repr__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __repr__(self): """Simple custom repr to summarize the main info""" return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format( self.mean_validation_score, np.std(self.cv_validation_scores), self.parameters) class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator, MetaEstimatorMixin)): """Base class for hyper parameter search with cross-validation.""" @abstractmethod def __init__(self, estimator, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): self.scoring = scoring self.estimator = estimator self.n_jobs = n_jobs self.fit_params = fit_params if fit_params is not None else {} self.iid = iid self.refit = refit self.cv = cv self.verbose = verbose self.pre_dispatch = pre_dispatch self.error_score = error_score @property def _estimator_type(self): return self.estimator._estimator_type def score(self, X, y=None): """Returns the score on the given data, if the estimator has been refit. This uses the score defined by ``scoring`` where provided, and the ``best_estimator_.score`` method otherwise. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float """ if self.scorer_ is None: raise ValueError("No score function explicitly defined, " "and the estimator doesn't provide one %s" % self.best_estimator_) return self.scorer_(self.best_estimator_, X, y) def _check_is_fitted(self, method_name): if not self.refit: raise NotFittedError(('This GridSearchCV instance was initialized ' 'with refit=False. %s is ' 'available only after refitting on the best ' 'parameters. ') % method_name) else: check_is_fitted(self, 'best_estimator_') @if_delegate_has_method(delegate='estimator') def predict(self, X): """Call predict on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict') return self.best_estimator_.predict(X) @if_delegate_has_method(delegate='estimator') def predict_proba(self, X): """Call predict_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict_proba') return self.best_estimator_.predict_proba(X) @if_delegate_has_method(delegate='estimator') def predict_log_proba(self, X): """Call predict_log_proba on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``predict_log_proba``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('predict_log_proba') return self.best_estimator_.predict_log_proba(X) @if_delegate_has_method(delegate='estimator') def decision_function(self, X): """Call decision_function on the estimator with the best found parameters. Only available if ``refit=True`` and the underlying estimator supports ``decision_function``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('decision_function') return self.best_estimator_.decision_function(X) @if_delegate_has_method(delegate='estimator') def transform(self, X): """Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports ``transform`` and ``refit=True``. Parameters ----------- X : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('transform') return self.best_estimator_.transform(X) @if_delegate_has_method(delegate='estimator') def inverse_transform(self, Xt): """Call inverse_transform on the estimator with the best found params. Only available if the underlying estimator implements ``inverse_transform`` and ``refit=True``. Parameters ----------- Xt : indexable, length n_samples Must fulfill the input assumptions of the underlying estimator. """ self._check_is_fitted('inverse_transform') return self.best_estimator_.transform(Xt) def _fit(self, X, y, labels, parameter_iterable): """Actual fitting, performing the search over parameters.""" estimator = self.estimator cv = check_cv(self.cv, y, classifier=is_classifier(estimator)) self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) X, y, labels = indexable(X, y, labels) n_splits = cv.get_n_splits(X, y, labels) if self.verbose > 0 and isinstance(parameter_iterable, Sized): n_candidates = len(parameter_iterable) print("Fitting {0} folds for each of {1} candidates, totalling" " {2} fits".format(n_splits, n_candidates, n_candidates * n_splits)) base_estimator = clone(self.estimator) pre_dispatch = self.pre_dispatch out = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch )(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_, train, test, self.verbose, parameters, self.fit_params, return_parameters=True, error_score=self.error_score) for parameters in parameter_iterable for train, test in cv.split(X, y, labels)) test_scores, test_sample_counts, _, parameters = zip(*out) candidate_params = parameters[::n_splits] n_candidates = len(candidate_params) test_scores = np.array(test_scores, dtype=np.float64).reshape(n_candidates, n_splits) # NOTE test_sample counts (weights) remain the same for all candidates test_sample_counts = np.array(test_sample_counts[:n_splits], dtype=np.int) # Computed the (weighted) mean and std for all the candidates weights = test_sample_counts if self.iid else None means = np.average(test_scores, axis=1, weights=weights) stds = np.sqrt(np.average((test_scores - means[:, np.newaxis]) ** 2, axis=1, weights=weights)) results = dict() for split_i in range(n_splits): results["test_split%d_score" % split_i] = test_scores[:, split_i] results["test_mean_score"] = means results["test_std_score"] = stds ranks = np.asarray(rankdata(-means, method='min'), dtype=np.int32) best_index = np.flatnonzero(ranks == 1)[0] best_parameters = candidate_params[best_index] results["test_rank_score"] = ranks # Use one np.MaskedArray and mask all the places where the param is not # applicable for that candidate. Use defaultdict as each candidate may # not contain all the params param_results = defaultdict(partial(np.ma.masked_all, (n_candidates,), dtype=object)) for cand_i, params in enumerate(candidate_params): for name, value in params.items(): # An all masked empty array gets created for the key # `"param_%s" % name` at the first occurence of `name`. # Setting the value at an index also unmasks that index param_results["param_%s" % name][cand_i] = value results.update(param_results) # Store a list of param dicts at the key 'params' results['params'] = candidate_params self.results_ = results self.best_index_ = best_index self.n_splits_ = n_splits if self.refit: # fit the best estimator using the entire dataset # clone first to work around broken estimators best_estimator = clone(base_estimator).set_params( **best_parameters) if y is not None: best_estimator.fit(X, y, **self.fit_params) else: best_estimator.fit(X, **self.fit_params) self.best_estimator_ = best_estimator return self @property def best_params_(self): check_is_fitted(self, 'results_') return self.results_['params'][self.best_index_] @property def best_score_(self): check_is_fitted(self, 'results_') return self.results_['test_mean_score'][self.best_index_] @property def grid_scores_(self): warnings.warn( "The grid_scores_ attribute was deprecated in version 0.18" " in favor of the more elaborate results_ attribute." " The grid_scores_ attribute will not be available from 0.20", DeprecationWarning) check_is_fitted(self, 'results_') grid_scores = list() for i, (params, mean, std) in enumerate(zip( self.results_['params'], self.results_['test_mean_score'], self.results_['test_std_score'])): scores = np.array(list(self.results_['test_split%d_score' % s][i] for s in range(self.n_splits_)), dtype=np.float64) grid_scores.append(_CVScoreTuple(params, mean, scores)) return grid_scores class GridSearchCV(BaseSearchCV): """Exhaustive search over specified parameter values for an estimator. Important members are fit, predict. GridSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated grid-search over a parameter grid. Read more in the :ref:`User Guide <grid_search>`. Parameters ---------- estimator : estimator object. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this GridSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Examples -------- >>> from sklearn import svm, datasets >>> from sklearn.model_selection import GridSearchCV >>> iris = datasets.load_iris() >>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} >>> svr = svm.SVC() >>> clf = GridSearchCV(svr, parameters) >>> clf.fit(iris.data, iris.target) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS GridSearchCV(cv=None, error_score=..., estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=..., decision_function_shape=None, degree=..., gamma=..., kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=..., verbose=False), fit_params={}, iid=..., n_jobs=1, param_grid=..., pre_dispatch=..., refit=..., scoring=..., verbose=...) >>> sorted(clf.results_.keys()) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS ['param_C', 'param_kernel', 'params', 'test_mean_score',... 'test_rank_score', 'test_split0_score', 'test_split1_score',... 'test_split2_score', 'test_std_score'] Attributes ---------- results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the below given table +------------+-----------+------------+-----------------+---+---------+ |param_kernel|param_gamma|param_degree|test_split0_score|...|...rank..| +============+===========+============+=================+===+=========+ | 'poly' | -- | 2 | 0.8 |...| 2 | +------------+-----------+------------+-----------------+---+---------+ | 'poly' | -- | 3 | 0.7 |...| 4 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.1 | -- | 0.8 |...| 3 | +------------+-----------+------------+-----------------+---+---------+ | 'rbf' | 0.2 | -- | 0.9 |...| 1 | +------------+-----------+------------+-----------------+---+---------+ will be represented by a ``results_`` dict of:: { 'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'], mask = [False False False False]...) 'param_gamma': masked_array(data = [-- -- 0.1 0.2], mask = [ True True False False]...), 'param_degree': masked_array(data = [2.0 3.0 -- --], mask = [False False True True]...), 'test_split0_score' : [0.8, 0.7, 0.8, 0.9], 'test_split1_score' : [0.82, 0.5, 0.7, 0.78], 'test_mean_score' : [0.81, 0.60, 0.75, 0.82], 'test_std_score' : [0.02, 0.01, 0.03, 0.03], 'test_rank_score' : [2, 4, 3, 1], 'params' : [{'kernel': 'poly', 'degree': 2}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Notes ------ The parameters selected are those that maximize the score of the left out data, unless an explicit score is passed in which case it is used instead. If `n_jobs` was set to a value higher than one, the data is copied for each point in the grid (and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also --------- :class:`ParameterGrid`: generates all the combinations of a hyperparameter grid. :func:`sklearn.model_selection.train_test_split`: utility function to split the data into a development set usable for fitting a GridSearchCV instance and an evaluation set for its final evaluation. :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise'): super(GridSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) self.param_grid = param_grid _check_param_grid(param_grid) def fit(self, X, y=None, labels=None): """Run fit with all sets of parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. """ return self._fit(X, y, labels, ParameterGrid(self.param_grid)) class RandomizedSearchCV(BaseSearchCV): """Randomized search on hyper parameters. RandomizedSearchCV implements a "fit" and a "score" method. It also implements "predict", "predict_proba", "decision_function", "transform" and "inverse_transform" if they are implemented in the estimator used. The parameters of the estimator used to apply these methods are optimized by cross-validated search over parameter settings. In contrast to GridSearchCV, not all parameter values are tried out, but rather a fixed number of parameter settings is sampled from the specified distributions. The number of parameter settings that are tried is given by n_iter. If all parameters are presented as a list, sampling without replacement is performed. If at least one parameter is given as a distribution, sampling with replacement is used. It is highly recommended to use continuous distributions for continuous parameters. Read more in the :ref:`User Guide <randomized_parameter_search>`. Parameters ---------- estimator : estimator object. A object of that type is instantiated for each grid point. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_distributions : dict Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. n_iter : int, default=10 Number of parameter settings that are sampled. n_iter trades off runtime vs quality of the solution. scoring : string, callable or None, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. If ``None``, the ``score`` method of the estimator is used. fit_params : dict, optional Parameters to pass to the fit method. n_jobs : int, default=1 Number of jobs to run in parallel. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' iid : boolean, default=True If True, the data is assumed to be identically distributed across the folds, and the loss minimized is the total loss per sample, and not the mean loss across the folds. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. refit : boolean, default=True Refit the best estimator with the entire dataset. If "False", it is impossible to make predictions using this RandomizedSearchCV instance after fitting. verbose : integer Controls the verbosity: the higher, the more messages. random_state : int or RandomState Pseudo random number generator state used for random uniform sampling from lists of possible values instead of scipy.stats distributions. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. Attributes ---------- results_ : dict of numpy (masked) ndarrays A dict with keys as column headers and values as columns, that can be imported into a pandas ``DataFrame``. For instance the below given table +--------------+-------------+-------------------+---+---------------+ | param_kernel | param_gamma | test_split0_score |...|test_rank_score| +==============+=============+===================+===+===============+ | 'rbf' | 0.1 | 0.8 |...| 2 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.2 | 0.9 |...| 1 | +--------------+-------------+-------------------+---+---------------+ | 'rbf' | 0.3 | 0.7 |...| 1 | +--------------+-------------+-------------------+---+---------------+ will be represented by a ``results_`` dict of:: { 'param_kernel' : masked_array(data = ['rbf', rbf', 'rbf'], mask = False), 'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False), 'test_split0_score' : [0.8, 0.9, 0.7], 'test_split1_score' : [0.82, 0.5, 0.7], 'test_mean_score' : [0.81, 0.7, 0.7], 'test_std_score' : [0.02, 0.2, 0.], 'test_rank_score' : [3, 1, 1], 'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...], } NOTE that the key ``'params'`` is used to store a list of parameter settings dict for all the parameter candidates. best_estimator_ : estimator Estimator that was chosen by the search, i.e. estimator which gave highest score (or smallest loss if specified) on the left out data. Not available if refit=False. best_score_ : float Score of best_estimator on the left out data. best_params_ : dict Parameter setting that gave the best results on the hold out data. best_index_ : int The index (of the ``results_`` arrays) which corresponds to the best candidate parameter setting. The dict at ``search.results_['params'][search.best_index_]`` gives the parameter setting for the best model, that gives the highest mean score (``search.best_score_``). scorer_ : function Scorer function used on the held out data to choose the best parameters for the model. n_splits_ : int The number of cross-validation splits (folds/iterations). Notes ----- The parameters selected are those that maximize the score of the held-out data, according to the scoring parameter. If `n_jobs` was set to a value higher than one, the data is copied for each parameter setting(and not `n_jobs` times). This is done for efficiency reasons if individual jobs take very little time, but may raise errors if the dataset is large and not enough memory is available. A workaround in this case is to set `pre_dispatch`. Then, the memory is copied only `pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 * n_jobs`. See Also -------- :class:`GridSearchCV`: Does exhaustive search over a grid of parameters. :class:`ParameterSampler`: A generator over parameter settins, constructed from param_distributions. """ def __init__(self, estimator, param_distributions, n_iter=10, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs', random_state=None, error_score='raise'): self.param_distributions = param_distributions self.n_iter = n_iter self.random_state = random_state super(RandomizedSearchCV, self).__init__( estimator=estimator, scoring=scoring, fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score) def fit(self, X, y=None, labels=None): """Run fit on the estimator with randomly drawn parameters. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_output], optional Target relative to X for classification or regression; None for unsupervised learning. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. """ sampled_params = ParameterSampler(self.param_distributions, self.n_iter, random_state=self.random_state) return self._fit(X, y, labels, sampled_params)
bsd-3-clause
weidel-p/nest-simulator
pynest/examples/clopath_synapse_small_network.py
12
7512
# -*- coding: utf-8 -*- # # clopath_synapse_small_network.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ Clopath Rule: Bidirectional connections ----------------------------------------- This script simulates a small network of ten excitatory and three inhibitory ``aeif_psc_delta_clopath`` neurons. The neurons are randomly connected and driven by 500 Poisson generators. The synapses from the Poisson generators to the excitatory population and those among the neurons of the network are Clopath synapses. The rate of the Poisson generators is modulated with a Gaussian profile whose center shifts randomly each 100 ms between ten equally spaced positions. This setup demonstrates that the Clopath synapse is able to establish bidirectional connections. The example is adapted from [1]_ (cf. fig. 5). References ~~~~~~~~~~~ .. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding: a model of voltage-based STDP with homeostasis. Nature Neuroscience 13:3, 344--352 """ import nest import numpy as np import matplotlib.pyplot as plt import random ############################################################################## # Set the parameters simulation_time = 1.0e4 resolution = 0.1 delay = resolution # Poisson_generator parameters pg_A = 30. # amplitude of Gaussian pg_sigma = 10. # std deviation nest.ResetKernel() nest.SetKernelStatus({'resolution': resolution}) # Create neurons and devices nrn_model = 'aeif_psc_delta_clopath' nrn_params = {'V_m': -30.6, 'g_L': 30.0, 'w': 0.0, 'tau_plus': 7.0, 'tau_minus': 10.0, 'tau_w': 144.0, 'a': 4.0, 'C_m': 281.0, 'Delta_T': 2.0, 'V_peak': 20.0, 't_clamp': 2.0, 'A_LTP': 8.0e-6, 'A_LTD': 14.0e-6, 'A_LTD_const': False, 'b': 0.0805, 'u_ref_squared': 60.0**2} pop_exc = nest.Create(nrn_model, 10, nrn_params) pop_inh = nest.Create(nrn_model, 3, nrn_params) ############################################################################## # We need parrot neurons since Poisson generators can only be connected # with static connections pop_input = nest.Create('parrot_neuron', 500) # helper neurons pg = nest.Create('poisson_generator', 500) wr = nest.Create('weight_recorder', 1) ############################################################################## # First connect Poisson generators to helper neurons nest.Connect(pg, pop_input, 'one_to_one', {'synapse_model': 'static_synapse', 'weight': 1.0, 'delay': delay}) ############################################################################## # Create all the connections nest.CopyModel('clopath_synapse', 'clopath_input_to_exc', {'Wmax': 3.0}) conn_dict_input_to_exc = {'rule': 'all_to_all'} syn_dict_input_to_exc = {'synapse_model': 'clopath_input_to_exc', 'weight': nest.random.uniform(0.5, 2.0), 'delay': delay} nest.Connect(pop_input, pop_exc, conn_dict_input_to_exc, syn_dict_input_to_exc) # Create input->inh connections conn_dict_input_to_inh = {'rule': 'all_to_all'} syn_dict_input_to_inh = {'synapse_model': 'static_synapse', 'weight': nest.random.uniform(0.0, 0.5), 'delay': delay} nest.Connect(pop_input, pop_inh, conn_dict_input_to_inh, syn_dict_input_to_inh) # Create exc->exc connections nest.CopyModel('clopath_synapse', 'clopath_exc_to_exc', {'Wmax': 0.75, 'weight_recorder': wr[0]}) syn_dict_exc_to_exc = {'synapse_model': 'clopath_exc_to_exc', 'weight': 0.25, 'delay': delay} conn_dict_exc_to_exc = {'rule': 'all_to_all', 'allow_autapses': False} nest.Connect(pop_exc, pop_exc, conn_dict_exc_to_exc, syn_dict_exc_to_exc) # Create exc->inh connections syn_dict_exc_to_inh = {'synapse_model': 'static_synapse', 'weight': 1.0, 'delay': delay} conn_dict_exc_to_inh = {'rule': 'fixed_indegree', 'indegree': 8} nest.Connect(pop_exc, pop_inh, conn_dict_exc_to_inh, syn_dict_exc_to_inh) # Create inh->exc connections syn_dict_inh_to_exc = {'synapse_model': 'static_synapse', 'weight': 1.0, 'delay': delay} conn_dict_inh_to_exc = {'rule': 'fixed_outdegree', 'outdegree': 6} nest.Connect(pop_inh, pop_exc, conn_dict_inh_to_exc, syn_dict_inh_to_exc) ############################################################################## # Randomize the initial membrane potential pop_exc.V_m = nest.random.normal(-60., 25.) pop_inh.V_m = nest.random.normal(-60., 25.) ############################################################################## # Simulation divided into intervals of 100ms for shifting the Gaussian sim_interval = 100. for i in range(int(simulation_time/sim_interval)): # set rates of poisson generators rates = np.empty(500) # pg_mu will be randomly chosen out of 25,75,125,...,425,475 pg_mu = 25 + random.randint(0, 9) * 50 for j in range(500): rates[j] = pg_A * np.exp((-1 * (j - pg_mu)**2) / (2 * pg_sigma**2)) pg[j].rate = rates[j]*1.75 nest.Simulate(sim_interval) ############################################################################## # Plot results fig1, axA = plt.subplots(1, sharex=False) # Plot synapse weights of the synapses within the excitatory population # Sort weights according to sender and reshape exc_conns = nest.GetConnections(pop_exc, pop_exc) exc_conns_senders = np.array(exc_conns.source) exc_conns_targets = np.array(exc_conns.target) exc_conns_weights = np.array(exc_conns.weight) idx_array = np.argsort(exc_conns_senders) targets = np.reshape(exc_conns_targets[idx_array], (10, 10-1)) weights = np.reshape(exc_conns_weights[idx_array], (10, 10-1)) # Sort according to target for i, (trgs, ws) in enumerate(zip(targets, weights)): idx_array = np.argsort(trgs) weights[i] = ws[idx_array] weight_matrix = np.zeros((10, 10)) tu9 = np.triu_indices_from(weights) tl9 = np.tril_indices_from(weights, -1) tu10 = np.triu_indices_from(weight_matrix, 1) tl10 = np.tril_indices_from(weight_matrix, -1) weight_matrix[tu10[0], tu10[1]] = weights[tu9[0], tu9[1]] weight_matrix[tl10[0], tl10[1]] = weights[tl9[0], tl9[1]] # Difference between initial and final value init_w_matrix = np.ones((10, 10))*0.25 init_w_matrix -= np.identity(10)*0.25 caxA = axA.imshow(weight_matrix - init_w_matrix) cbarB = fig1.colorbar(caxA, ax=axA) axA.set_xticks([0, 2, 4, 6, 8]) axA.set_xticklabels(['1', '3', '5', '7', '9']) axA.set_yticks([0, 2, 4, 6, 8]) axA.set_xticklabels(['1', '3', '5', '7', '9']) axA.set_xlabel("to neuron") axA.set_ylabel("from neuron") axA.set_title("Change of syn weights before and after simulation") plt.show()
gpl-2.0
architecture-building-systems/CityEnergyAnalyst
cea/optimization/optimization_main.py
2
7207
""" multi-objective optimization of supply systems for the CEA """ import os import time import warnings import pandas as pd import cea.config import cea.inputlocator from cea.optimization.master import master_main from cea.optimization.preprocessing.preprocessing_main import get_building_names_with_load from cea.optimization.preprocessing.preprocessing_main import preproccessing from .constants import DH_ACRONYM, DC_ACRONYM warnings.filterwarnings("ignore") __author__ = "Jimeno A. Fonseca" __copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich" __credits__ = ["Thuy-an Ngugen", "Jimeno A. Fonseca", "Sreepathi Bhargava Krishna"] __license__ = "MIT" __version__ = "0.1" __maintainer__ = "Daren Thomas" __email__ = "[email protected]" __status__ = "Production" # optimization def moo_optimization(locator, weather_file, config): ''' This function optimizes the conversion, storage and distribution systems of a heating distribution for the case study. It requires that the energy demand, technology potential and thermal networks are simulated, as follows: - energy demand simulation: run cea/demand/demand_main.py - PV potential: run cea/technologies/solar/photovoltaic.py - PVT potential: run cea/technologies/solar/photovoltaic_thermal.py - flat plate solar collector potential: run cea/technologies/solar/solar_collector.py with config.solar.type_scpanel = 'FP' - evacuated tube solar collector potential: run cea/technologies/solar/solar_collector.py with config.solar.type_scpanel = 'ET' - waste water heat recovery: run cea/resources/sewage_heat_exchanger.py - lake water potential: run cea/resources/water_body_potential.py - thermal network simulation: run cea/technologies/thermal_network/thermal_network.py if no network is currently present in the case study, consider running network_layout/main.py first - decentralized building simulation: run cea/optimization/preprocessing/decentralized_building_main.py :param locator: path to input locator :param weather_file: path to weather file :type locator: cea.inputlocator.InputLocator :type weather_file: string :returns: None :rtype: Nonetype ''' t0 = time.perf_counter() # read total demand file and names and number of all buildings total_demand = pd.read_csv(locator.get_total_demand()) building_names_all = list(total_demand.Name.values) # needs to be a list to avoid errors # local flags if config.optimization.network_type == DH_ACRONYM: district_heating_network = True district_cooling_network = False elif config.optimization.network_type == DC_ACRONYM: district_heating_network = False district_cooling_network = True else: raise Exception("no valid values for 'network-type' input parameter") # GET NAMES_OF BUILDINGS THAT HAVE HEATING, COOLING AND ELECTRICITY LOAD SEPARATELY buildings_heating_demand = get_building_names_with_load(total_demand, load_name='QH_sys_MWhyr') buildings_cooling_demand = get_building_names_with_load(total_demand, load_name='QC_sys_MWhyr') buildings_electricity_demand = get_building_names_with_load(total_demand, load_name='E_sys_MWhyr') # pre-process information regarding resources and technologies (they are treated before the optimization) # optimize best systems for every individual building (they will compete against a district distribution solution) print("PRE-PROCESSING") weather_features, network_features, prices, lca = preproccessing(locator, total_demand, buildings_heating_demand, buildings_cooling_demand, weather_file, district_heating_network, district_cooling_network) # optimize conversion systems print("SUPPLY SYSTEMS OPTIMIZATION") master_main.non_dominated_sorting_genetic_algorithm(locator, building_names_all, district_heating_network, district_cooling_network, buildings_heating_demand, buildings_cooling_demand, buildings_electricity_demand, network_features, weather_features, config, prices, lca) t1 = time.perf_counter() print('Centralized Optimization succeeded after %s seconds' % (t1 - t0)) # ============================ # test # ============================ def main(config): """ run the whole optimization routine """ locator = cea.inputlocator.InputLocator(scenario=config.scenario) weather_file = locator.get_weather_file() try: check_input_files(config, locator) except ValueError as err: import sys print(err.message) sys.exit(1) moo_optimization(locator=locator, weather_file=weather_file, config=config) def check_input_files(config, locator): """ Raise a ``ValueError`` if any of the required input files are missing. :param cea.config.Configuration config: The config object to use :param cea.config.InputLocator locator: The input locator to use :return: None """ network_type = config.optimization.network_type if not demand_files_exist(locator): raise ValueError("Missing demand data of the scenario. Consider running demand script first.") if not os.path.exists(locator.SC_totals(panel_type='FP')): raise ValueError( "Missing SC potential of panel type 'FP' of the scenario. Consider running solar-collector script first with panel_type as FP and t-in-SC as 75") if not os.path.exists(locator.SC_totals(panel_type='ET')): raise ValueError( "Missing SC potential of panel type 'ET' of the scenario. Consider running solar-collector script first with panel_type as ET and t-in-SC as 150") if not os.path.exists(locator.get_thermal_network_edge_list_file(network_type, '')): raise ValueError( "Missing thermal network simulation results. Consider running thermal network simulation script first.") def demand_files_exist(locator): """verify that the necessary demand files exist""" return all(os.path.exists(locator.get_demand_results_file(building_name)) for building_name in locator.get_zone_building_names()) if __name__ == '__main__': main(cea.config.Configuration())
mit