repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
abimannans/scikit-learn
|
examples/cluster/plot_affinity_propagation.py
|
349
|
2304
|
"""
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
NICTA/dora
|
demos/linsampler_demo.py
|
1
|
2834
|
"""Demo for lin_sampler.py"""
import logging
import numpy as np
import matplotlib.pyplot as pl
import dora.active_sampling as sampling
from dora.active_sampling import pltutils
def ground_truth(X):
"""Compute the ground truth."""
return np.sin(X-5)+np.sin(X/2-2)+0.4*np.sin(X/5-2)+0.4*np.sin(X-3)+0.2*np.sin(X/0.3-3)
def main():
#Set up the problem
x = np.arange(0, 30, 0.1)
#Set up the bayesian linear sampler
lower = [0]
upper = [30]
n_train = 15
feature = 20
#Define the basis function
basisdef = 'radial'
mu = np.linspace(0, 30, feature)[:, np.newaxis]
s = 1.5
basisparams = [mu, s]
#Define the acquisition function
acq_name = 'pred_upper_bound'
explore_prority = 1.
#Construct the sampler
sampler = sampling.BayesianLinear(lower, upper, basisdef=basisdef,
basisparams=basisparams, feature=feature,
acq_name=acq_name, n_train=n_train, seed=11)
#Sample the training data for the sampler
for i in range(n_train):
xq, uid = sampler.pick()
yq_true = ground_truth(xq)
sampler.update(uid, yq_true)
#Enough training data, train the sampler
xq, uid = sampler.pick()
#Query
for i in range(8):
yq_true = ground_truth(xq)
sampler.update(uid, yq_true)
xquery = x[:, np.newaxis]
f_mean, f_std = sampler.predict(xquery)
#Visualize the prediction
pl.figure(figsize=(15,5))
pl.subplot(2,1,1)
pl.plot(x, ground_truth(x), 'k')
pl.plot(sampler.X[:,-1], sampler.y[:,-1], 'go', markersize=10)
pl.plot(sampler.X[-1], sampler.y[-1], 'ro', markersize=10)
pl.plot(xquery, f_mean, 'b--')
lower = f_mean - f_std*2
upper = f_mean + f_std*2
pl.fill_between(xquery[:,0], upper[:,0], lower[:,0], facecolor='lightblue')
pl.xlabel('x')
pl.ylabel('f(x)')
pl.legend(('Ground turth', 'Observations',
'Most recent observation', 'Predicted mean', 'Predicted 2 standard deviation'))
pl.title('Observations after update')
#Visualize the acquisition function
acq_value, acq_max_ind = sampler.eval_acq(x)
pl.subplot(2,1,2)
pl.plot(x, ground_truth(x), 'k')
pl.plot(sampler.X, sampler.y, 'go', markersize=10)
pl.plot(x, acq_value, 'r--')
pl.plot(x[acq_max_ind], acq_value[acq_max_ind], 'rD', markersize=10)
pl.xlabel('x')
pl.ylabel('f(x)')
pl.title('The new acquisition function after update')
pl.legend(('Ground turth', 'Observations', 'Acquisition function', 'Acquisition function max'))
pl.show()
#Pick the next new query point
xq, uid = sampler.pick()
if __name__ == "__main__":
main()
|
apache-2.0
|
rahuldhote/scikit-learn
|
benchmarks/bench_sample_without_replacement.py
|
397
|
8008
|
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
|
bsd-3-clause
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sklearn/linear_model/tests/test_ridge.py
|
2
|
11500
|
import numpy as np
import scipy.sparse as sp
from nose.tools import assert_true
from numpy.testing import assert_almost_equal, assert_array_almost_equal, \
assert_equal, assert_array_equal
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_greater
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.cross_validation import KFold
rng = np.random.RandomState(0)
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
"""Ridge regression convergence test using score
TODO: for this test to be robust, we should use a dataset instead
of np.random.
"""
alpha = 1.0
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.5)
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.5)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_shapes():
"""Test shape of coef_ and intercept_
"""
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
"""Test intercept with multiple targets GH issue #708
"""
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
"""Test BayesianRegression ridge classifier
TODO: test also n_samples > n_features
"""
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
"""On alpha=0., Ridge and OLS yield the same solution."""
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
ridge_gcv2 = RidgeCV(fit_intercept=False, loss_func=mean_squared_error)
ridge_gcv2.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
ridge_gcv3 = RidgeCV(fit_intercept=False, score_func=func)
ridge_gcv3.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense != None and ret_sparse != None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_class_weights():
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_class_weights_cv():
"""
Test class weights for cross validated ridge classifier.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
"""
Test _RidgeCV's store_cv_values attribute.
"""
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
|
agpl-3.0
|
codeaudit/augur-core
|
tests/consensus/timing/optimize.py
|
4
|
8881
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This software (Augur) allows buying and selling event options in Ethereum.
Copyright (c) 2014 Chris Calderon, Joey Krug, Scott Leonard, Alan Lu, Jack Peterson
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Questions? Please contact [email protected] or [email protected].
"""
from __future__ import division
from pprint import pprint
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
try:
from colorama import Fore, Style, init
except ImportError:
pass
from pyethereum import tester as t
from pyconsensus import Oracle
pd.set_option("display.max_rows", 25)
pd.set_option("display.width", 1000)
pd.options.display.mpl_style = "default"
np.set_printoptions(linewidth=500)
# precision=5,
# suppress=True,
# formatter={"float": "{: 0.3f}".format})
tolerance = 1e-12
init()
if matplotlib.is_interactive():
plt.ioff()
def BR(string): # bright red
return "\033[1;31m" + str(string) + "\033[0m"
def BB(string): # bright blue
return Fore.BLUE + Style.BRIGHT + str(string) + Style.RESET_ALL
def BG(string): # bright green
return Fore.GREEN + Style.BRIGHT + str(string) + Style.RESET_ALL
def blocky(*strings, **kwds):
colored = kwds.get("colored", True)
width = kwds.get("width", 108)
bound = width*"#"
fmt = "#{:^%d}#" % (width - 2)
lines = [bound]
for string in strings:
lines.append(fmt.format(string))
lines.append(bound)
lines = "\n".join(lines)
if colored:
lines = BR(lines)
return lines
def fix(x):
return int(x * 0x10000000000000000)
def unfix(x):
return x / 0x10000000000000000
def consensus(reports, reputation, max_iterations=10):
s = t.state()
filename = "consensus.se"
c = s.contract(filename)
num_voters = len(reputation)
num_events = len(reports[0])
v_size = len(reports.flatten())
reputation_fixed = map(fix, reputation)
reports_fixed = map(fix, reports.flatten())
# tx 1: consensus()
result = s.send(t.k0, c, 0,
funid=0,
abi=[reports_fixed, reputation_fixed, max_iterations])
result = np.array(result)
weighted_centered_data = result[0:v_size]
votes_filled = result[v_size:(2*v_size)]
votes_mask = result[(2*v_size):(3*v_size)]
# pca()
s = t.state()
c = s.contract(filename)
scores = s.send(t.k0, c, 0,
funid=1,
abi=[weighted_centered_data.tolist(),
reputation_fixed,
num_voters,
num_events,
max_iterations])
# consensus2()
s = t.state()
c = s.contract(filename)
retval = s.send(t.k0, c, 0,
funid=2,
abi=[reputation_fixed,
scores,
votes_filled.tolist(),
votes_mask.tolist(),
num_voters,
num_events])
outcome_final = retval[0:num_events]
author_bonus = retval[num_events:(2*num_events)]
voter_bonus = retval[(2*num_events):-2]
return outcome_final, author_bonus, voter_bonus, retval[-2] - retval[-1]
def profile(contract):
filename = contract + ".se"
print BB("Contract:"), BG(filename)
MAX_SIZE = 25
np.random.seed(0)
print BR("Events fixed, varying reporters")
# s = t.state()
# c = s.contract(filename)
reporters_gas_used = []
sizes = range(2, MAX_SIZE+1)
reporters_sizes_used = []
reporters_errors = []
num_events = 4
for k in sizes:
print(str(k) + 'x' + str(num_events))
reports = np.random.randint(-1, 2, (k, num_events))
reputation = np.random.randint(1, 100, k)
try:
# gas_used = s.send(t.k0, c, 0,
# funid=0,
# abi=[map(fix, reports.flatten()),
# map(fix, reputation),
# 5])
gas_used = consensus(reports, reputation, max_iterations=5)[-1]
reporters_gas_used.append(gas_used)
reporters_sizes_used.append(k)
except Exception as exc:
print(exc)
break
print BR("Reporters fixed, varying events")
# s = t.state()
# c = s.contract(filename)
sizes = range(2, MAX_SIZE+1)
events_gas_used = []
events_sizes_used = []
num_reporters = 4
for k in sizes:
print(str(num_reporters) + 'x' + str(k))
reports = np.random.randint(-1, 2, (num_reporters, k))
reputation = np.random.randint(1, 100, num_reporters)
try:
# gas_used = s.send(t.k0, c, 0,
# funid=0,
# abi=[map(fix, reports.flatten()),
# map(fix, reputation),
# 5])
gas_used = consensus(reports, reputation, max_iterations=5)[-1]
events_gas_used.append(gas_used)
events_sizes_used.append(k)
except Exception as exc:
print(exc)
break
print BR("Reporters and events fixed, varying PCA iterations")
num_reporters = 6
num_events = 4
pca_iter_sizes = range(1, 31)
voter_bonus_rmsd = []
author_bonus_rmsd = []
outcome_rmsd = []
pca_iter_sizes_used = []
for k in pca_iter_sizes:
# s = t.state()
# c = s.contract(filename)
print k, "iterations"
reports = np.random.randint(-1, 2, (num_reporters, num_events))
reputation = np.random.randint(1, 100, num_reporters)
try:
# retval = np.array(map(unfix, s.send(t.k0, c, 0, funid=0, abi=[map(fix, reports.flatten()), map(fix, reputation), k])))
outcome_final, author_bonus, voter_bonus = consensus(reports, reputation, max_iterations=k)
# compare to pyconsensus results
outcome = Oracle(votes=reports, reputation=reputation).consensus()
outcome_rmsd.append(np.mean((outcome_final - np.array(outcome["events"]["outcome_final"]))**2))
voter_bonus_rmsd.append(np.mean((voter_bonus - np.array(outcome["agents"]["voter_bonus"]))**2))
author_bonus_rmsd.append(np.mean((author_bonus - np.array(outcome["events"]["author_bonus"]))**2))
pca_iter_sizes_used.append(k)
except Exception as exc:
print(exc)
break
plt.figure()
plt.subplot(411)
plt.plot(reporters_sizes_used, reporters_gas_used, 'o-', linewidth=1.5, color="steelblue")
plt.axis([1, np.max(reporters_sizes_used)+1, np.min(reporters_gas_used)/1.1, np.max(reporters_gas_used)*1.1])
plt.xlabel("# reporters (" + str(num_events) + " events)")
plt.ylabel("gas used")
plt.grid(True)
plt.subplot(412)
plt.plot(events_sizes_used, events_gas_used, 'o-', linewidth=1.5, color="steelblue")
plt.axis([1, np.max(events_sizes_used)+1, np.min(events_gas_used)/1.1, np.max(events_gas_used)*1.1])
plt.xlabel("# events (" + str(num_reporters) + " reporters)")
plt.ylabel("gas used")
plt.grid(True)
plt.subplot(421)
plt.plot(pca_iter_sizes_used, voter_bonus_rmsd, 'o-', linewidth=1.5, color="steelblue")
plt.plot(pca_iter_sizes_used, author_bonus_rmsd, 'o-', linewidth=1.5, color="red")
# plt.axis([1, np.max(events_sizes_used)+1, np.min(events_gas_used)/1.1, np.max(events_gas_used)*1.1])
plt.title("red: author bonus (cash/bitcoin), blue: voter bonus (reputation)")
plt.xlabel("# pca iterations")
plt.ylabel("RMSD")
plt.grid(True)
plt.subplot(422)
plt.plot(pca_iter_sizes_used, outcome_rmsd, 'o-', linewidth=1.5, color="steelblue")
plt.title("outcome")
plt.xlabel("# pca iterations")
plt.ylabel("RMSD")
plt.grid(True)
plt.savefig("parameters.png")
plt.show()
def main():
profile("consensus")
if __name__ == "__main__":
main()
|
gpl-3.0
|
synthicity/urbansim
|
urbansim/urbanchoice/tests/test_mnl.py
|
3
|
7041
|
"""
Test data and results for this are generated
by the R script at data/mnl_tests.R.
"""
from __future__ import division
import os.path
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from patsy import dmatrix
from .. import mnl
@pytest.fixture
def num_alts():
return 4
@pytest.fixture(scope='module', params=[
('fish.csv',
'fish_choosers.csv',
'price + catch - 1',
'mode',
pd.Series([-0.02047652, 0.95309824], index=['price', 'catch']),
pd.DataFrame([
[0.2849598, 0.2742482, 0.1605457, 0.2802463],
[0.1498991, 0.4542377, 0.2600969, 0.1357664]],
columns=['beach', 'boat', 'charter', 'pier'])),
('fish.csv',
'fish_choosers.csv',
'price:income + catch:income + catch * price - 1',
'mode',
pd.Series([
9.839876e-01, -2.659466e-02, 6.933946e-07, -1.324231e-04,
7.646750e-03],
index=[
'catch', 'price', 'price:income', 'catch:income', 'catch:price']),
pd.DataFrame([
[0.2885868, 0.2799776, 0.1466286, 0.2848070],
[0.1346205, 0.4855238, 0.2593983, 0.1204575]],
columns=['beach', 'boat', 'charter', 'pier'])),
('travel_mode.csv',
'travel_choosers.csv',
'wait + travel + vcost - 1',
'choice',
pd.Series([
-0.033976668, -0.002192951, 0.008890669],
index=['wait', 'travel', 'vcost']),
pd.DataFrame([
[0.2776876, 0.1584818, 0.1049530, 0.4588777],
[0.1154490, 0.1653297, 0.1372684, 0.5819528]],
columns=['air', 'train', 'bus', 'car'])),
('travel_mode.csv',
'travel_choosers.csv',
'wait + travel + income:vcost + income:gcost - 1',
'choice',
pd.Series([
-3.307586e-02, -2.518762e-03, 1.601746e-04, 3.745822e-05],
index=['wait', 'travel', 'income:vcost', 'income:gcost']),
pd.DataFrame([
[0.2862046, 0.1439074, 0.1044490, 0.4654390],
[0.1098313, 0.1597317, 0.1344395, 0.5959975]],
columns=['air', 'train', 'bus', 'car']))])
def test_data(request):
data, choosers, form, col, est_expected, sim_expected = request.param
return {
'data': data,
'choosers': choosers,
'formula': form,
'column': col,
'est_expected': est_expected,
'sim_expected': sim_expected
}
@pytest.fixture
def df(test_data):
filen = os.path.join(os.path.dirname(__file__), 'data', test_data['data'])
return pd.read_csv(filen)
@pytest.fixture
def choosers(test_data):
filen = os.path.join(
os.path.dirname(__file__), 'data', test_data['choosers'])
return pd.read_csv(filen)
@pytest.fixture
def chosen(df, num_alts, test_data):
return df[test_data['column']].values.astype('int').reshape(
(int(len(df) / num_alts), num_alts))
@pytest.fixture
def dm(df, test_data):
return dmatrix(test_data['formula'], data=df, return_type='dataframe')
@pytest.fixture
def choosers_dm(choosers, test_data):
return dmatrix(
test_data['formula'], data=choosers, return_type='dataframe')
@pytest.fixture
def fit_coeffs(dm, chosen, num_alts):
log_like, fit = mnl.mnl_estimate(dm.values, chosen, num_alts)
return fit.Coefficient.values
def test_mnl_estimate(dm, chosen, num_alts, test_data):
log_like, fit = mnl.mnl_estimate(dm.values, chosen, num_alts)
result = pd.Series(fit.Coefficient.values, index=dm.columns)
result, expected = result.align(test_data['est_expected'])
npt.assert_allclose(result.values, expected.values, rtol=1e-4)
def test_mnl_simulate(dm, fit_coeffs, num_alts, test_data, choosers_dm):
# check that if all the alternatives have the same numbers
# we get an even probability distribution
data = np.array(
[[10 ** (x + 1) for x in range(len(dm.columns))]] * num_alts)
probs = mnl.mnl_simulate(
data, fit_coeffs, num_alts, returnprobs=True)
npt.assert_allclose(probs, [[1 / num_alts] * num_alts])
# now test with real data
probs = mnl.mnl_simulate(
choosers_dm.values, fit_coeffs, num_alts, returnprobs=True)
results = pd.DataFrame(probs, columns=test_data['sim_expected'].columns)
results, expected = results.align(test_data['sim_expected'])
npt.assert_allclose(results.values, expected.values, rtol=1e-4)
def test_alternative_specific_coeffs(num_alts):
template = np.array(
[[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
fish = df({'data': 'fish.csv'})
fish_choosers = choosers({'choosers': 'fish_choosers.csv'})
fish_chosen = chosen(fish, num_alts, {'column': 'mode'})
# construct design matrix with columns repeated for 3 / 4 of alts
num_choosers = len(fish['chid'].unique())
intercept_df = pd.DataFrame(
np.tile(template, (num_choosers, 1)),
columns=[
'boat:(intercept)', 'charter:(intercept)', 'pier:(intercept)'])
income_df = pd.DataFrame(
np.tile(template, (num_choosers, 1)),
columns=[
'boat:income', 'charter:income', 'pier:income'])
for idx, row in fish.iterrows():
income_df.loc[idx] = income_df.loc[idx] * row['income']
dm = pd.concat([intercept_df, income_df], axis=1)
# construct choosers design matrix
num_choosers = len(fish_choosers['chid'].unique())
intercept_df = pd.DataFrame(
np.tile(template, (num_choosers, 1)),
columns=[
'boat:(intercept)', 'charter:(intercept)', 'pier:(intercept)'])
income_df = pd.DataFrame(
np.tile(template, (num_choosers, 1)),
columns=[
'boat:income', 'charter:income', 'pier:income'])
for idx, row in fish_choosers.iterrows():
income_df.loc[idx] = income_df.loc[idx] * row['income']
choosers_dm = pd.concat([intercept_df, income_df], axis=1)
# test estimation
expected = pd.Series([
7.389208e-01, 1.341291e+00, 8.141503e-01, 9.190636e-05,
-3.163988e-05, -1.434029e-04],
index=[
'boat:(intercept)', 'charter:(intercept)', 'pier:(intercept)',
'boat:income', 'charter:income', 'pier:income'])
log_like, fit = mnl.mnl_estimate(dm.values, fish_chosen, num_alts)
result = pd.Series(fit.Coefficient.values, index=dm.columns)
result, expected = result.align(expected)
npt.assert_allclose(result.values, expected.values, rtol=1e-4)
# test simulation
expected = pd.DataFrame([
[0.1137676, 0.2884583, 0.4072931, 0.190481],
[0.1153440, 0.3408657, 0.3917253, 0.152065]],
columns=['beach', 'boat', 'charter', 'pier'])
fit_coeffs = fit.Coefficient.values
probs = mnl.mnl_simulate(
choosers_dm.values, fit_coeffs, num_alts, returnprobs=True)
results = pd.DataFrame(probs, columns=expected.columns)
results, expected = results.align(expected)
npt.assert_allclose(results.values, expected.values, rtol=1e-4)
|
bsd-3-clause
|
pythonvietnam/scikit-learn
|
sklearn/tests/test_isotonic.py
|
230
|
11087
|
import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
|
bsd-3-clause
|
phdowling/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
276
|
3790
|
# Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
Reagankm/KnockKnock
|
venv/lib/python3.4/site-packages/matplotlib/testing/jpl_units/UnitDblConverter.py
|
23
|
5574
|
#===========================================================================
#
# UnitDblConverter
#
#===========================================================================
"""UnitDblConverter module containing class UnitDblConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
import matplotlib.projections.polar as polar
from matplotlib.cbook import iterable
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblConverter' ]
#===========================================================================
# A special function for use with the matplotlib FuncFormatter class
# for formatting axes with radian units.
# This was copied from matplotlib example code.
def rad_fn(x, pos = None ):
"""Radian function formatter."""
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return str(x)
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return r'$%s\pi$' % (n/2,)
else:
return r'$%s\pi/2$' % (n,)
#===========================================================================
class UnitDblConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for the Monte UnitDbl class.
"""
# default for plotting
defaults = {
"distance" : 'km',
"angle" : 'deg',
"time" : 'sec',
}
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has Epoch data.
= INPUT VARIABLES
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
# Check to see if the value used for units is a string unit value
# or an actual instance of a UnitDbl so that we can use the unit
# value for the default axis label value.
if ( unit ):
if ( isinstance( unit, six.string_types ) ):
label = unit
else:
label = unit.label()
else:
label = None
if ( label == "deg" ) and isinstance( axis.axes, polar.PolarAxes ):
# If we want degrees for a polar plot, use the PolarPlotFormatter
majfmt = polar.PolarAxes.ThetaFormatter()
else:
majfmt = U.UnitDblFormatter( useOffset = False )
return units.AxisInfo( majfmt = majfmt, label = label )
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- value The value or list of values that need to be converted.
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
isNotUnitDbl = True
if ( iterable(value) and not isinstance(value, six.string_types) ):
if ( len(value) == 0 ):
return []
else:
return [ UnitDblConverter.convert( x, unit, axis ) for x in value ]
# We need to check to see if the incoming value is actually a UnitDbl and
# set a flag. If we get an empty list, then just return an empty list.
if ( isinstance(value, U.UnitDbl) ):
isNotUnitDbl = False
# If the incoming value behaves like a number, but is not a UnitDbl,
# then just return it because we don't know how to convert it
# (or it is already converted)
if ( isNotUnitDbl and units.ConversionInterface.is_numlike( value ) ):
return value
# If no units were specified, then get the default units to use.
if ( unit == None ):
unit = UnitDblConverter.default_units( value, axis )
# Convert the incoming UnitDbl value/values to float/floats
if isinstance( axis.axes, polar.PolarAxes ) and (value.type() == "angle"):
# Guarantee that units are radians for polar plots.
return value.convert( "rad" )
return value.convert( unit )
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
Return the default unit for value, or None.
"""
# Determine the default units based on the user preferences set for
# default units when printing a UnitDbl.
if ( iterable(value) and not isinstance(value, six.string_types) ):
return UnitDblConverter.default_units( value[0], axis )
else:
return UnitDblConverter.defaults[ value.type() ]
|
gpl-2.0
|
NorthArrowResearch/pyGISExperiments
|
SFRWeek/tutorial2.py
|
2
|
1189
|
import ogr
import json
from shapely.geometry import *
import matplotlib.pyplot as plt
import numpy as np
"""
Tutorial 2: We're going to open a shape file then close it.
"""
sFilename = "inputs/Thalweg.shp"
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(sFilename, 0)
layer = dataSource.GetLayer()
spatialRef = layer.GetSpatialRef()
rawfeatures = [feat for feat in layer]
features = []
for feat in rawfeatures:
featobj = json.loads(feat.ExportToJson())
fields = {}
for f in fields:
fields[f] = feat.GetField(f)
features.append({
'geometry': shape(featobj['geometry']),
'fields': fields
})
thalweg = features[0]['geometry']
thalweg2arr = np.array(thalweg.coords)
thalweg2arr[:,1] += 10
newthalweg = LineString(thalweg2arr)
# Some Plotting
fig = plt.figure(1, figsize=(10, 10))
ax = fig.gca()
ax.plot(*thalweg.xy, color='#FF0000', alpha=1, markersize=5, marker="o", zorder=10, label="Old Thalweg")
ax.plot(*newthalweg.xy, color='#00FF00', alpha=1, markersize=10, marker="o", zorder=10, label="New Thalweg")
plt.autoscale(enable=False)
plt.legend(loc='best')
plt.show()
plt.clf()
plt.close()
print "DONE"
|
gpl-2.0
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/chronos/model/forecast/test_prophet_forecaster.py
|
1
|
5121
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
import tempfile
import os
from zoo.chronos.model.forecast.prophet_forecaster import ProphetForecaster
from unittest import TestCase
import pytest
def create_data():
seq_len = 400
data = pd.DataFrame(pd.date_range('20130101', periods=seq_len), columns=['ds'])
data.insert(1, 'y', np.random.rand(seq_len))
horizon = np.random.randint(2, 50)
validation_data = pd.DataFrame(pd.date_range('20140426', periods=horizon), columns=['ds'])
validation_data.insert(1, 'y', np.random.rand(horizon))
return data, validation_data
class TestChronosModelProphetForecaster(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_prophet_forecaster_fit_eval_pred(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
train_loss = forecaster.fit(data, validation_data)
test_pred = forecaster.predict(validation_data.shape[0])
assert test_pred.shape[0] == validation_data.shape[0]
test_mse = forecaster.evaluate(validation_data)
def test_prophet_forecaster_save_restore(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
train_loss = forecaster.fit(data, validation_data)
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "json")
test_pred_save = forecaster.predict(validation_data.shape[0])
forecaster.save(ckpt_name)
forecaster.restore(ckpt_name)
test_pred_restore = forecaster.predict(validation_data.shape[0])
assert (test_pred_save['yhat'] == test_pred_restore['yhat']).all()
def test_prophet_forecaster_runtime_error(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
with pytest.raises(Exception,
match="You must call fit or restore first before calling predict!"):
forecaster.predict(horizon=validation_data.shape[0])
with pytest.raises(Exception,
match="Input invalid validation_data of None"):
forecaster.evaluate(validation_data=None)
with pytest.raises(Exception,
match="You must call fit or restore first before calling evaluate!"):
forecaster.evaluate(validation_data=validation_data)
with pytest.raises(Exception,
match="You must call fit or restore first before calling save!"):
model_file = "tmp.json"
forecaster.save(model_file)
def test_prophet_forecaster_shape_error(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
with pytest.raises(AssertionError):
forecaster.fit(data[['ds']], validation_data)
with pytest.raises(AssertionError):
forecaster.fit(data, validation_data[['ds']])
|
apache-2.0
|
drammock/mne-python
|
mne/decoding/time_delaying_ridge.py
|
6
|
13451
|
# -*- coding: utf-8 -*-
"""TimeDelayingRidge class."""
# Authors: Eric Larson <[email protected]>
# Ross Maddox <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from .base import BaseEstimator
from ..cuda import _setup_cuda_fft_multiply_repeated
from ..filter import next_fast_len
from ..fixes import jit
from ..parallel import check_n_jobs
from ..utils import warn, ProgressBar, logger
def _compute_corrs(X, y, smin, smax, n_jobs=1, fit_intercept=False,
edge_correction=True):
"""Compute auto- and cross-correlations."""
if fit_intercept:
# We could do this in the Fourier domain, too, but it should
# be a bit cleaner numerically to do it here.
X_offset = np.mean(X, axis=0)
y_offset = np.mean(y, axis=0)
if X.ndim == 3:
X_offset = X_offset.mean(axis=0)
y_offset = np.mean(y_offset, axis=0)
X = X - X_offset
y = y - y_offset
else:
X_offset = y_offset = 0.
if X.ndim == 2:
assert y.ndim == 2
X = X[:, np.newaxis, :]
y = y[:, np.newaxis, :]
assert X.shape[:2] == y.shape[:2]
len_trf = smax - smin
len_x, n_epochs, n_ch_x = X.shape
len_y, n_epcohs, n_ch_y = y.shape
assert len_x == len_y
n_fft = next_fast_len(2 * X.shape[0] - 1)
n_jobs, cuda_dict = _setup_cuda_fft_multiply_repeated(
n_jobs, [1.], n_fft, 'correlation calculations')
# create our Toeplitz indexer
ij = np.empty((len_trf, len_trf), int)
for ii in range(len_trf):
ij[ii, ii:] = np.arange(len_trf - ii)
x = np.arange(n_fft - 1, n_fft - len_trf + ii, -1)
ij[ii + 1:, ii] = x
x_xt = np.zeros([n_ch_x * len_trf] * 2)
x_y = np.zeros((len_trf, n_ch_x, n_ch_y), order='F')
n = n_epochs * (n_ch_x * (n_ch_x + 1) // 2 + n_ch_x)
logger.info('Fitting %d epochs, %d channels' % (n_epochs, n_ch_x))
pb = ProgressBar(n, mesg='Sample')
count = 0
pb.update(count)
for ei in range(n_epochs):
this_X = X[:, ei, :]
# XXX maybe this is what we should parallelize over CPUs at some point
X_fft = cuda_dict['rfft'](this_X, n=n_fft, axis=0)
X_fft_conj = X_fft.conj()
y_fft = cuda_dict['rfft'](y[:, ei, :], n=n_fft, axis=0)
for ch0 in range(n_ch_x):
for oi, ch1 in enumerate(range(ch0, n_ch_x)):
this_result = cuda_dict['irfft'](
X_fft[:, ch0] * X_fft_conj[:, ch1], n=n_fft, axis=0)
# Our autocorrelation structure is a Toeplitz matrix, but
# it's faster to create the Toeplitz ourselves than use
# linalg.toeplitz.
this_result = this_result[ij]
# However, we need to adjust for coeffs that are cut off,
# i.e. the non-zero delays should not have the same AC value
# as the zero-delay ones (because they actually have fewer
# coefficients).
#
# These adjustments also follow a Toeplitz structure, so we
# construct a matrix of what has been left off, compute their
# inner products, and remove them.
if edge_correction:
_edge_correct(this_result, this_X, smax, smin, ch0, ch1)
# Store the results in our output matrix
x_xt[ch0 * len_trf:(ch0 + 1) * len_trf,
ch1 * len_trf:(ch1 + 1) * len_trf] += this_result
if ch0 != ch1:
x_xt[ch1 * len_trf:(ch1 + 1) * len_trf,
ch0 * len_trf:(ch0 + 1) * len_trf] += this_result.T
count += 1
pb.update(count)
# compute the crosscorrelations
cc_temp = cuda_dict['irfft'](
y_fft * X_fft_conj[:, slice(ch0, ch0 + 1)], n=n_fft, axis=0)
if smin < 0 and smax >= 0:
x_y[:-smin, ch0] += cc_temp[smin:]
x_y[len_trf - smax:, ch0] += cc_temp[:smax]
else:
x_y[:, ch0] += cc_temp[smin:smax]
count += 1
pb.update(count)
x_y = np.reshape(x_y, (n_ch_x * len_trf, n_ch_y), order='F')
return x_xt, x_y, n_ch_x, X_offset, y_offset
@jit()
def _edge_correct(this_result, this_X, smax, smin, ch0, ch1):
if smax > 0:
tail = _toeplitz_dot(this_X[-1:-smax:-1, ch0],
this_X[-1:-smax:-1, ch1])
if smin > 0:
tail = tail[smin - 1:, smin - 1:]
this_result[max(-smin + 1, 0):, max(-smin + 1, 0):] -= tail
if smin < 0:
head = _toeplitz_dot(this_X[:-smin, ch0],
this_X[:-smin, ch1])[::-1, ::-1]
if smax < 0:
head = head[:smax, :smax]
this_result[:-smin, :-smin] -= head
@jit()
def _toeplitz_dot(a, b):
"""Create upper triangular Toeplitz matrices & compute the dot product."""
# This is equivalent to:
# a = linalg.toeplitz(a)
# b = linalg.toeplitz(b)
# a[np.triu_indices(len(a), 1)] = 0
# b[np.triu_indices(len(a), 1)] = 0
# out = np.dot(a.T, b)
assert a.shape == b.shape and a.ndim == 1
out = np.outer(a, b)
for ii in range(1, len(a)):
out[ii, ii:] += out[ii - 1, ii - 1:-1]
out[ii + 1:, ii] += out[ii:-1, ii - 1]
return out
def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct',
normed=False):
"""Compute regularization parameter from neighbors."""
from scipy import linalg
from scipy.sparse.csgraph import laplacian
known_types = ('ridge', 'laplacian')
if isinstance(reg_type, str):
reg_type = (reg_type,) * 2
if len(reg_type) != 2:
raise ValueError('reg_type must have two elements, got %s'
% (len(reg_type),))
for r in reg_type:
if r not in known_types:
raise ValueError('reg_type entries must be one of %s, got %s'
% (known_types, r))
reg_time = (reg_type[0] == 'laplacian' and n_delays > 1)
reg_chs = (reg_type[1] == 'laplacian' and n_ch_x > 1)
if not reg_time and not reg_chs:
return np.eye(n_ch_x * n_delays)
# regularize time
if reg_time:
reg = np.eye(n_delays)
stride = n_delays + 1
reg.flat[1::stride] += -1
reg.flat[n_delays::stride] += -1
reg.flat[n_delays + 1:-n_delays - 1:stride] += 1
args = [reg] * n_ch_x
reg = linalg.block_diag(*args)
else:
reg = np.zeros((n_delays * n_ch_x,) * 2)
# regularize features
if reg_chs:
block = n_delays * n_delays
row_offset = block * n_ch_x
stride = n_delays * n_ch_x + 1
reg.flat[n_delays:-row_offset:stride] += -1
reg.flat[n_delays + row_offset::stride] += 1
reg.flat[row_offset:-n_delays:stride] += -1
reg.flat[:-(n_delays + row_offset):stride] += 1
assert np.array_equal(reg[::-1, ::-1], reg)
if method == 'direct':
if normed:
norm = np.sqrt(np.diag(reg))
reg /= norm
reg /= norm[:, np.newaxis]
return reg
else:
# Use csgraph. Note that our -1's above are really the neighbors!
# If we ever want to allow arbitrary adjacency matrices, this is how
# we'd want to do it.
reg = laplacian(-reg, normed=normed)
return reg
def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in):
"""Fit the model using correlation matrices."""
# do the regularized solving
from scipy import linalg
n_ch_out = x_y.shape[1]
assert x_y.shape[0] % n_ch_x == 0
n_delays = x_y.shape[0] // n_ch_x
reg = _compute_reg_neighbors(n_ch_x, n_delays, reg_type)
mat = x_xt + alpha * reg
# From sklearn
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
w = linalg.solve(mat, x_y, sym_pos=True, overwrite_a=False)
except np.linalg.LinAlgError:
warn('Singular matrix in solving dual problem. Using '
'least-squares solution instead.')
w = linalg.lstsq(mat, x_y, lapack_driver='gelsy')[0]
w = w.T.reshape([n_ch_out, n_ch_in, n_delays])
return w
class TimeDelayingRidge(BaseEstimator):
"""Ridge regression of data with time delays.
Parameters
----------
tmin : int | float
The starting lag, in seconds (or samples if ``sfreq`` == 1).
Negative values correspond to times in the past.
tmax : int | float
The ending lag, in seconds (or samples if ``sfreq`` == 1).
Positive values correspond to times in the future.
Must be >= tmin.
sfreq : float
The sampling frequency used to convert times into samples.
alpha : float
The ridge (or laplacian) regularization factor.
reg_type : str | list
Can be "ridge" (default) or "laplacian".
Can also be a 2-element list specifying how to regularize in time
and across adjacent features.
fit_intercept : bool
If True (default), the sample mean is removed before fitting.
n_jobs : int | str
The number of jobs to use. Can be an int (default 1) or ``'cuda'``.
.. versionadded:: 0.18
edge_correction : bool
If True (default), correct the autocorrelation coefficients for
non-zero delays for the fact that fewer samples are available.
Disabling this speeds up performance at the cost of accuracy
depending on the relationship between epoch length and model
duration. Only used if ``estimator`` is float or None.
.. versionadded:: 0.18
See Also
--------
mne.decoding.ReceptiveField
Notes
-----
This class is meant to be used with :class:`mne.decoding.ReceptiveField`
by only implicitly doing the time delaying. For reasonable receptive
field and input signal sizes, it should be more CPU and memory
efficient by using frequency-domain methods (FFTs) to compute the
auto- and cross-correlations.
"""
_estimator_type = "regressor"
def __init__(self, tmin, tmax, sfreq, alpha=0., reg_type='ridge',
fit_intercept=True, n_jobs=1, edge_correction=True):
if tmin > tmax:
raise ValueError('tmin must be <= tmax, got %s and %s'
% (tmin, tmax))
self.tmin = float(tmin)
self.tmax = float(tmax)
self.sfreq = float(sfreq)
self.alpha = float(alpha)
self.reg_type = reg_type
self.fit_intercept = fit_intercept
self.edge_correction = edge_correction
self.n_jobs = n_jobs
@property
def _smin(self):
return int(round(self.tmin * self.sfreq))
@property
def _smax(self):
return int(round(self.tmax * self.sfreq)) + 1
def fit(self, X, y):
"""Estimate the coefficients of the linear model.
Parameters
----------
X : array, shape (n_samples[, n_epochs], n_features)
The training input samples to estimate the linear coefficients.
y : array, shape (n_samples[, n_epochs], n_outputs)
The target values.
Returns
-------
self : instance of TimeDelayingRidge
Returns the modified instance.
"""
if X.ndim == 3:
assert y.ndim == 3
assert X.shape[:2] == y.shape[:2]
else:
assert X.ndim == 2 and y.ndim == 2
assert X.shape[0] == y.shape[0]
n_jobs = check_n_jobs(self.n_jobs, allow_cuda=True)
# These are split into two functions because it's possible that we
# might want to allow people to do them separately (e.g., to test
# different regularization parameters).
self.cov_, x_y_, n_ch_x, X_offset, y_offset = _compute_corrs(
X, y, self._smin, self._smax, n_jobs, self.fit_intercept,
self.edge_correction)
self.coef_ = _fit_corrs(self.cov_, x_y_, n_ch_x,
self.reg_type, self.alpha, n_ch_x)
# This is the sklearn formula from LinearModel (will be 0. for no fit)
if self.fit_intercept:
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.sum(-1).T)
else:
self.intercept_ = 0.
return self
def predict(self, X):
"""Predict the output.
Parameters
----------
X : array, shape (n_samples[, n_epochs], n_features)
The data.
Returns
-------
X : ndarray
The predicted response.
"""
if X.ndim == 2:
X = X[:, np.newaxis, :]
singleton = True
else:
singleton = False
out = np.zeros(X.shape[:2] + (self.coef_.shape[0],))
smin = self._smin
offset = max(smin, 0)
for ei in range(X.shape[1]):
for oi in range(self.coef_.shape[0]):
for fi in range(self.coef_.shape[1]):
temp = np.convolve(X[:, ei, fi], self.coef_[oi, fi])
temp = temp[max(-smin, 0):][:len(out) - offset]
out[offset:len(temp) + offset, ei, oi] += temp
out += self.intercept_
if singleton:
out = out[:, 0, :]
return out
|
bsd-3-clause
|
warmspringwinds/scikit-image
|
doc/examples/plot_phase_unwrap.py
|
14
|
4080
|
"""
================
Phase Unwrapping
================
Some signals can only be observed modulo 2*pi, and this can also apply to
two- and three dimensional images. In these cases phase unwrapping is
needed to recover the underlying, unwrapped signal. In this example we will
demonstrate an algorithm [1]_ implemented in ``skimage`` at work for such a
problem. One-, two- and three dimensional images can all be unwrapped using
skimage. Here we will demonstrate phase unwrapping in the two dimensional case.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage import data, img_as_float, color, exposure
from skimage.restoration import unwrap_phase
# Load an image as a floating-point grayscale
image = color.rgb2gray(img_as_float(data.chelsea()))
# Scale the image to [0, 4*pi]
image = exposure.rescale_intensity(image, out_range=(0, 4 * np.pi))
# Create a phase-wrapped image in the interval [-pi, pi)
image_wrapped = np.angle(np.exp(1j * image))
# Perform phase unwrapping
image_unwrapped = unwrap_phase(image_wrapped)
fig, ax = plt.subplots(2, 2)
ax1, ax2, ax3, ax4 = ax.ravel()
fig.colorbar(ax1.imshow(image, cmap='gray', vmin=0, vmax=4 * np.pi), ax=ax1)
ax1.set_title('Original')
fig.colorbar(ax2.imshow(image_wrapped, cmap='gray', vmin=-np.pi, vmax=np.pi), ax=ax2)
ax2.set_title('Wrapped phase')
fig.colorbar(ax3.imshow(image_unwrapped, cmap='gray'), ax=ax3)
ax3.set_title('After phase unwrapping')
fig.colorbar(ax4.imshow(image_unwrapped - image, cmap='gray'), ax=ax4)
ax4.set_title('Unwrapped minus original')
"""
.. image:: PLOT2RST.current_figure
The unwrapping procedure accepts masked arrays, and can also optionally
assume cyclic boundaries to connect edges of an image. In the example below,
we study a simple phase ramp which has been split in two by masking
a row of the image.
"""
# Create a simple ramp
image = np.ones((100, 100)) * np.linspace(0, 8 * np.pi, 100).reshape((-1, 1))
# Mask the image to split it in two horizontally
mask = np.zeros_like(image, dtype=np.bool)
mask[image.shape[0] // 2, :] = True
image_wrapped = np.ma.array(np.angle(np.exp(1j * image)), mask=mask)
# Unwrap image without wrap around
image_unwrapped_no_wrap_around = unwrap_phase(image_wrapped,
wrap_around=(False, False))
# Unwrap with wrap around enabled for the 0th dimension
image_unwrapped_wrap_around = unwrap_phase(image_wrapped,
wrap_around=(True, False))
fig, ax = plt.subplots(2, 2)
ax1, ax2, ax3, ax4 = ax.ravel()
fig.colorbar(ax1.imshow(np.ma.array(image, mask=mask), cmap='jet'), ax=ax1)
ax1.set_title('Original')
fig.colorbar(ax2.imshow(image_wrapped, cmap='jet', vmin=-np.pi, vmax=np.pi),
ax=ax2)
ax2.set_title('Wrapped phase')
fig.colorbar(ax3.imshow(image_unwrapped_no_wrap_around, cmap='jet'),
ax=ax3)
ax3.set_title('Unwrapped without wrap_around')
fig.colorbar(ax4.imshow(image_unwrapped_wrap_around, cmap='jet'), ax=ax4)
ax4.set_title('Unwrapped with wrap_around')
plt.show()
"""
.. image:: PLOT2RST.current_figure
In the figures above, the masked row can be seen as a white line across
the image. The difference between the two unwrapped images in the bottom row
is clear: Without unwrapping (lower left), the regions above and below the
masked boundary do not interact at all, resulting in an offset between the
two regions of an arbitrary integer times two pi. We could just as well have
unwrapped the regions as two separate images. With wrap around enabled for the
vertical direction (lower right), the situation changes: Unwrapping paths are
now allowed to pass from the bottom to the top of the image and vice versa, in
effect providing a way to determine the offset between the two regions.
References
----------
.. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path", Journal Applied Optics, Vol. 41, No. 35, pp. 7437, 2002
"""
|
bsd-3-clause
|
etkirsch/scikit-learn
|
sklearn/linear_model/tests/test_randomized_l1.py
|
214
|
4690
|
# Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
|
bsd-3-clause
|
ahoyosid/scikit-learn
|
examples/classification/plot_classification_probability.py
|
242
|
2624
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
|
bsd-3-clause
|
The-Fonz/xfoil-optimization-toolbox
|
optimize_for_cl_nurbs.py
|
1
|
5143
|
"""
Test of PSO algorithm in combination with XFoil and NURBS Airfoil parametrization.
Trying to find high Re low drag airfoil.
"""
from __future__ import division, print_function
from os import remove
import numpy as np
from copy import copy
from string import ascii_uppercase
from random import choice
import matplotlib.pyplot as plt
from optimization_algorithms.pso import Particle
from airfoil_generators import nurbs
from xfoil import xfoil
Re = 300000
constraints = np.array((
#ta_u #ta_l #tb_l #alpha_b
#ta_u = upward angle at front
#ta_1 = bottom angle at front
#tb_u = bottom angle at back
#tb_l
#alpha_a
#alph_b = top angle at back
(.01,.4), (.05,.4), (1,3),(0.05,3), (0.4,8), (1,10)
#(.1,.4), (.1,.4), (.1,2), (1,10)
#(.05,.15), (.05,.15), (.6,8), (1,1.3)
))
# Good parameters at:
# http://hvass-labs.org/people/magnus/publications/pedersen10good-pso.pdf
iterations, S, omega, theta_g, theta_p = 100, 12, -0.2, 2.8, 0
def construct_airfoil(*pts):
k = {}
k['ta_u'] = pts[0]
k['ta_l'] = pts[1]
k['tb_u'] = pts[2]
k['tb_l'] = pts[3]
k['alpha_b'] = pts[4]
k['alpha_c'] = pts[5]
return nurbs.NURBS(k)
def plot(argv, ax, score=None, title=None, style='r-'):
x_l = argv[0]
y_l = argv[1]
x_u = argv[2]
y_u = argv[3]
ax.set_xlim(0,1)
ax.plot(y_l, x_l, style, y_u, x_u, style, linewidth=2)
if score:
ax.annotate(str(score), (.4,0))
if title:
ax.set_title(title)
def get_coords_plain(argv):
x_l = argv[0]
y_l = argv[1]
x_u = argv[2]
y_u = argv[3]
ycoords = np.append(y_l[::-1], y_u[1:])
xcoords = np.append(x_l[::-1], x_u[1:])
coordslist = np.array((xcoords, ycoords)).T
coordstrlist = ["{:.6f} {:.6f}".format(coord[1], coord[0])
for coord in coordslist]
return '\n'.join(coordstrlist)
def score_airfoil(airfoil):
# Make unique filename
randstr = ''.join(choice(ascii_uppercase) for i in range(20))
filename = "parsec_{}.dat".format(randstr)
# Save coordinates
with open(filename, 'w') as af:
af.write(get_coords_plain(airfoil._spline()))
#Let Xfoil do its magic
polar = xfoil.oper_visc_cl(filename,0,Re,
iterlim =80, show_seconds =0)
polar2 = xfoil.oper_visc_cl(filename,0.4,Re,
iterlim =80, show_seconds =0)
try:
remove(filename)
except WindowsError:
print("\n\n\n\nWindows was not capable of removing the file.\n\n\n\n")
try:
score = polar[0][0][2] * 0.5 + polar2[0][0][2] * 0.5
print("Score: ", score)
# If it's not NaN
if np.isfinite(score):
print("Return Score")
return score
else:
print("Return None")
return None
except IndexError:
print("Return None (IndexError)")
return None
# Show plot and make redrawing possible
fig, (cur_afplt, lastpbest_afplt, gbest_afplt, score_plt) = plt.subplots(4,1)
# Enable auto-clearing
cur_afplt.hold(False)
lastpbest_afplt.hold(False)
gbest_afplt.hold(False)
cur_afplt.axis('equal')
lastpbest_afplt.axis('equal')
gbest_afplt.axis('equal')
plt.tight_layout()
# Interactive mode
plt.ion()
#plt.pause(.0001)
# Initialize globals
global_bestscore = None
global_bestpos = None
global_bestairfoil = None
# Constructing a particle automatically initializes position and speed
particles = [Particle(constraints) for i in xrange(0, S)]
scores_y = []
for n in xrange(iterations+1):
print("\nIteration {}".format(n))
for i_par, particle in enumerate(particles):
# Keep scoring until converged
score = None
while not score:
if global_bestscore:
print("Update Particle")
particle.update(global_bestpos,omega,theta_p,theta_g)
airfoil = construct_airfoil(*particle.pts)
score = score_airfoil(airfoil)
plotstyle = "{}-".format(choice("rgb"))
af = airfoil._spline()
plot(af,cur_afplt, score="Cd {}".format(score), style=plotstyle,
title="Current, particle n{}p{}".format(n, i_par))
if not score and (not global_bestscore or n==0):
print("Not converged, no global best, or first round. Randomizing particle.")
particle.randomize()
elif not score:
print("Not converged, there is a global best. Randomizing.")
particle.randomize()
if not particle.bestscore or score < particle.bestscore:
particle.new_best(score)
txt = 'particle best'
plot(af,lastpbest_afplt, score="Cd {}".format(score), style=plotstyle,
title="Particle best, particle n{}p{}".format(n, i_par))
print("Found particle best, score {}".format(score))
if not global_bestscore or score < global_bestscore:
global_bestscore = score
# Copy to avoid globaL_bestpos becoming reference to array
global_bestpos = copy(particle.pts)
txt = 'global best'
plot(af, gbest_afplt, score="Cd {}".format(score), style=plotstyle,
title="Global best, particle n{}p{}".format(n, i_par))
#plt.pause(.0001)
print("Found global best, score {}".format(score))
global_bestairfoil = airfoil
scores_y.append(global_bestscore)
score_plt.plot(scores_y, 'r-')
score_plt.set_title("Global best per round")
plt.pause(.0001)
print("Best airfoil found for Re={}, ".format(Re),
"score = ", global_bestscore,
", pos = ", global_bestpos.__repr__(),
", airfoil points:\n{}".format(get_coords_plain(af)))
plt.show()
|
unlicense
|
tawsifkhan/scikit-learn
|
doc/conf.py
|
210
|
8446
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
d-mittal/pystruct
|
pystruct/learners/frankwolfe_ssvm.py
|
3
|
11655
|
######################
# Authors:
# Xianghang Liu <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3-clause
#
# Implements structured SVM as described in Joachims et. al.
# Cutting-Plane Training of Structural SVMs
import warnings
from time import time
import numpy as np
from sklearn.utils import check_random_state
from pystruct.learners.ssvm import BaseSSVM
from pystruct.utils import find_constraint
class FrankWolfeSSVM(BaseSSVM):
"""Structured SVM solver using Block-coordinate Frank-Wolfe.
This implementation is somewhat experimental. Use with care.
References
----------
* Lacoste-Julien, Jaggi, Schmidt, Pletscher:
Block-Coordinage Frank-Wolfe Optimization for Structural SVMs,xi
JMLR 2013
With batch_mode=False, this implements the online (block-coordinate)
version of the algorithm (BCFW)
BCFW is an attractive alternative to subgradient methods, as no
learning rate is needed and a duality gap guarantee is given.
Parameters
----------
model : StructuredModel
Object containing the model structure. Has to implement
`loss`, `inference` and `loss_augmented_inference`.
max_iter : int, default=1000
Maximum number of passes over dataset to find constraints.
C : float, default=1
Regularization parameter. Corresponds to 1 / (lambda * n_samples).
verbose : int
Verbosity.
n_jobs : int, default=1
Number of parallel processes. Currently only n_jobs=1 is supported.
show_loss_every : int, default=0
How often the training set loss should be computed.
Zero corresponds to never.
tol : float, default=1e-3
Convergence tolerance on the duality gap.
logger : logger object, default=None
Pystruct logger for storing the model or extracting additional
information.
batch_mode : boolean, default=False
Whether to use batch updates. Will slow down learning enormously.
line_search : boolean, default=True
Whether to compute the optimum step size in each step.
The line-search is done in closed form and cheap.
There is usually no reason to turn this off.
check_dual_every : int, default=10
How often the stopping criterion should be checked. Computing
the stopping criterion is as costly as doing one pass over the dataset,
so check_dual_every=1 will make learning twice as slow.
do_averaging : bool, default=True
Whether to use weight averaging as described in the reference paper.
Currently this is only supported in the block-coordinate version.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
w : nd-array, shape=(model.size_joint_feature,)
The learned weights of the SVM.
``loss_curve_`` : list of float
List of loss values if show_loss_every > 0.
``objective_curve_`` : list of float
Cutting plane objective after each pass through the dataset.
``primal_objective_curve_`` : list of float
Primal objective after each pass through the dataset.
``timestamps_`` : list of int
Total training time stored before each iteration.
"""
def __init__(self, model, max_iter=1000, C=1.0, verbose=0, n_jobs=1,
show_loss_every=0, logger=None, batch_mode=False,
line_search=True, check_dual_every=10, tol=.001,
do_averaging=True, sample_method='perm', random_state=None):
if n_jobs != 1:
warnings.warn("FrankWolfeSSVM does not support multiprocessing"
" yet. Ignoring n_jobs != 1.")
if sample_method not in ['perm', 'rnd', 'seq']:
raise ValueError("sample_method can only be perm, rnd, or seq")
BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose,
n_jobs=n_jobs, show_loss_every=show_loss_every,
logger=logger)
self.tol = tol
self.batch_mode = batch_mode
self.line_search = line_search
self.check_dual_every = check_dual_every
self.do_averaging = do_averaging
self.sample_method = sample_method
self.random_state = random_state
def _calc_dual_gap(self, X, Y):
n_samples = len(X)
joint_feature_gt = self.model.batch_joint_feature(X, Y, Y) # FIXME don't calculate this again
Y_hat = self.model.batch_loss_augmented_inference(X, Y, self.w,
relaxed=True)
djoint_feature = joint_feature_gt - self.model.batch_joint_feature(X, Y_hat)
ls = np.sum(self.model.batch_loss(Y, Y_hat))
ws = djoint_feature * self.C
l_rescaled = self.l * n_samples * self.C
dual_val = -0.5 * np.sum(self.w ** 2) + l_rescaled
w_diff = self.w - ws
dual_gap = w_diff.T.dot(self.w) - l_rescaled + ls * self.C
primal_val = dual_val + dual_gap
return dual_val, dual_gap, primal_val
def _frank_wolfe_batch(self, X, Y):
"""Batch Frank-Wolfe learning.
This is basically included for reference / comparision only,
as the block-coordinate version is much faster.
Compare Algorithm 2 in the reference paper.
"""
l = 0.0
n_samples = float(len(X))
joint_feature_gt = self.model.batch_joint_feature(X, Y, Y)
for iteration in range(self.max_iter):
Y_hat = self.model.batch_loss_augmented_inference(X, Y, self.w,
relaxed=True)
djoint_feature = joint_feature_gt - self.model.batch_joint_feature(X, Y_hat)
ls = np.mean(self.model.batch_loss(Y, Y_hat))
ws = djoint_feature * self.C
w_diff = self.w - ws
dual_gap = 1.0 / (self.C * n_samples) * w_diff.T.dot(self.w) - l + ls
# line search for gamma
if self.line_search:
eps = 1e-15
gamma = dual_gap / (np.sum(w_diff ** 2) / (self.C * n_samples) + eps)
gamma = max(0.0, min(1.0, gamma))
else:
gamma = 2.0 / (iteration + 2.0)
dual_val = -0.5 * np.sum(self.w ** 2) + l * (n_samples * self.C)
dual_gap_display = dual_gap * n_samples * self.C
primal_val = dual_val + dual_gap_display
self.primal_objective_curve_.append(primal_val)
self.objective_curve_.append(dual_val)
self.timestamps_.append(time() - self.timestamps_[0])
if self.verbose > 0:
print("iteration %d, dual: %f, dual_gap: %f, primal: %f, gamma: %f"
% (iteration, dual_val, dual_gap_display, primal_val, gamma))
# update w and l
self.w = (1.0 - gamma) * self.w + gamma * ws
l = (1.0 - gamma) * l + gamma * ls
if self.logger is not None:
self.logger(self, iteration)
if dual_gap < self.tol:
return
def _frank_wolfe_bc(self, X, Y):
"""Block-Coordinate Frank-Wolfe learning.
Compare Algorithm 3 in the reference paper.
"""
n_samples = len(X)
w = self.w.copy()
w_mat = np.zeros((n_samples, self.model.size_joint_feature))
l_mat = np.zeros(n_samples)
l = 0.0
k = 0
rng = check_random_state(self.random_state)
for iteration in range(self.max_iter):
if self.verbose > 0:
print("Iteration %d" % iteration)
perm = np.arange(n_samples)
if self.sample_method == 'perm':
rng.shuffle(perm)
elif self.sample_method == 'rnd':
perm = rng.randint(low=0, high=n_samples, size=n_samples)
for j in range(n_samples):
i = perm[j]
x, y = X[i], Y[i]
y_hat, delta_joint_feature, slack, loss = find_constraint(self.model, x, y, w)
# ws and ls
ws = delta_joint_feature * self.C
ls = loss / n_samples
# line search
if self.line_search:
eps = 1e-15
w_diff = w_mat[i] - ws
gamma = (w_diff.T.dot(w) - (self.C * n_samples)*(l_mat[i] - ls)) / (np.sum(w_diff ** 2) + eps)
gamma = max(0.0, min(1.0, gamma))
else:
gamma = 2.0 * n_samples / (k + 2.0 * n_samples)
w -= w_mat[i]
w_mat[i] = (1.0 - gamma) * w_mat[i] + gamma * ws
w += w_mat[i]
l -= l_mat[i]
l_mat[i] = (1.0 - gamma) * l_mat[i] + gamma * ls
l += l_mat[i]
if self.do_averaging:
rho = 2. / (k + 2.)
self.w = (1. - rho) * self.w + rho * w
self.l = (1. - rho) * self.l + rho * l
else:
self.w = w
self.l = l
k += 1
if (self.check_dual_every != 0) and (iteration % self.check_dual_every == 0):
dual_val, dual_gap, primal_val = self._calc_dual_gap(X, Y)
self.primal_objective_curve_.append(primal_val)
self.objective_curve_.append(dual_val)
self.timestamps_.append(time() - self.timestamps_[0])
if self.verbose > 0:
print("dual: %f, dual_gap: %f, primal: %f"
% (dual_val, dual_gap, primal_val))
if self.logger is not None:
self.logger(self, iteration)
if dual_gap < self.tol:
return
def fit(self, X, Y, constraints=None, initialize=True):
"""Learn parameters using (block-coordinate) Frank-Wolfe learning.
Parameters
----------
X : iterable
Traing instances. Contains the structured input objects.
No requirement on the particular form of entries of X is made.
Y : iterable
Training labels. Contains the strctured labels for inputs in X.
Needs to have the same length as X.
contraints : ignored
initialize : boolean, default=True
Whether to initialize the model for the data.
Leave this true except if you really know what you are doing.
"""
if initialize:
self.model.initialize(X, Y)
self.objective_curve_, self.primal_objective_curve_ = [], []
self.timestamps_ = [time()]
self.w = getattr(self, "w", np.zeros(self.model.size_joint_feature))
self.l = getattr(self, "l", 0)
try:
if self.batch_mode:
self._frank_wolfe_batch(X, Y)
else:
self._frank_wolfe_bc(X, Y)
except KeyboardInterrupt:
pass
if self.verbose:
print("Calculating final objective.")
self.timestamps_.append(time() - self.timestamps_[0])
self.primal_objective_curve_.append(self._objective(X, Y))
self.objective_curve_.append(self.objective_curve_[-1])
if self.logger is not None:
self.logger(self, 'final')
return self
|
bsd-2-clause
|
williampma/opencog
|
opencog/python/spatiotemporal/temporal_events/membership_function.py
|
34
|
4673
|
from math import fabs
from random import random
from scipy.stats.distributions import rv_frozen
from spatiotemporal.time_intervals import TimeInterval
from spatiotemporal.unix_time import random_time, UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.functions import Function, FunctionPiecewiseLinear,\
FunctionHorizontalLinear, FunctionComposite, FUNCTION_ZERO, FUNCTION_ONE, FunctionLinear
from numpy import PINF as POSITIVE_INFINITY, NINF as NEGATIVE_INFINITY
from utility.numeric.globals import EPSILON
__author__ = 'keyvan'
class MembershipFunction(Function):
def __init__(self, temporal_event):
Function.__init__(self, function_undefined=FUNCTION_ZERO, domain=temporal_event)
def call_on_single_point(self, time_step):
return self.domain.distribution_beginning.cdf(time_step) - self.domain.distribution_ending.cdf(time_step)
class ProbabilityDistributionPiecewiseLinear(list, TimeInterval, rv_frozen):
dist = 'ProbabilityDistributionPiecewiseLinear'
_mean = None
asd = None
def __init__(self, dictionary_input_output):
cdf_input_list, cdf_output_list = convert_dict_to_sorted_lists(dictionary_input_output)
list.__init__(self, cdf_input_list)
TimeInterval.__init__(self, self[0], self[-1], 2)
self.cdf = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
self.cdf.dictionary_bounds_function[(self.b, POSITIVE_INFINITY)] = FUNCTION_ONE
pdf_output_list = []
dictionary_bounds_function = {}
for bounds in sorted(self.cdf.dictionary_bounds_function):
a, b = bounds
if a in [NEGATIVE_INFINITY, POSITIVE_INFINITY] or b in [NEGATIVE_INFINITY, POSITIVE_INFINITY]:
continue
pdf_y_intercept = fabs(self.cdf.derivative((a + b) / 2.0))
pdf_output_list.append(pdf_y_intercept)
dictionary_bounds_function[bounds] = FunctionHorizontalLinear(pdf_y_intercept)
self.pdf = FunctionComposite(dictionary_bounds_function, function_undefined=FUNCTION_ZERO, domain=self,
is_normalised=True)
self.roulette_wheel = []
self._mean = 0
for bounds in sorted(self.pdf.dictionary_bounds_function):
(a, b) = bounds
if a in [NEGATIVE_INFINITY, POSITIVE_INFINITY] and b in [NEGATIVE_INFINITY, POSITIVE_INFINITY]:
continue
cdf = self.cdf.dictionary_bounds_function[bounds]
pdf = self.pdf.dictionary_bounds_function[bounds]
share = cdf(b)
self.roulette_wheel.append((a, b, share))
self._mean += (a + b) / 2.0 * pdf(a) * (b - a)
def std(self):
# Not properly implemented
return 0
def stats(self, moments='mv'):
# Not properly implemented
# m, s, k
return self.mean(), 0, 0
def mean(self):
return self._mean
def interval(self, alpha):
if alpha == 1:
return self.a, self.b
raise NotImplementedError("'interval' is not implemented for 'alpha' other than 1")
def rvs(self, size=None):
if size is None:
size = 1
else:
assert isinstance(size, int)
result = []
start, end = 0, 0
for i in xrange(size):
rand = random()
for a, b, share in self.roulette_wheel:
if rand < share:
start, end = a, b
break
result.append(random_time(start, end))
if size == 1:
return result[0]
return result
# def plot(self):
# import matplotlib.pyplot as plt
# x_axis, y_axis = [], []
# for time_step in self:
# x_axis.append(UnixTime(time_step - EPSILON).to_datetime())
# x_axis.append(UnixTime(time_step + EPSILON).to_datetime())
# y_axis.append(self.pdf(time_step - EPSILON))
# y_axis.append(self.pdf(time_step + EPSILON))
# plt.plot(x_axis, y_axis)
# return plt
def plot(self):
import matplotlib.pyplot as plt
x_axis, y_axis = [], []
for time_step in self:
x_axis.append(time_step - EPSILON)
x_axis.append(time_step + EPSILON)
y_axis.append(self.pdf(time_step - EPSILON))
y_axis.append(self.pdf(time_step + EPSILON))
plt.plot(x_axis, y_axis)
return plt
def __hash__(self):
return object.__hash__(self)
def __repr__(self):
return TimeInterval.__repr__(self)
def __str__(self):
return repr(self)
|
agpl-3.0
|
phoebe-project/phoebe2-docs
|
2.1/examples/binary_spots.py
|
1
|
1686
|
#!/usr/bin/env python
# coding: utf-8
# Binary with Spots
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.ipynb) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# Model without Spots
# --------------------------
# In[3]:
b.add_dataset('lc', times=phoebe.linspace(0,1,101))
# In[4]:
b.run_compute(irrad_method='none', model='no_spot')
# Adding Spots
# ---------------------
# Let's add a spot to the primary component in our binary.
#
# The 'colat' parameter defines the colatitude on the star measured from its North (spin) Pole. The 'long' parameter measures the longitude of the spot - with longitude = 0 being defined as pointing towards the other star at t0. See the [spots tutorial](../tutorials/spots.ipynb) for more details.
# In[5]:
b.add_feature('spot', component='primary', feature='spot01', relteff=0.9, radius=30, colat=45, long=90)
# In[6]:
b.run_compute(irrad_method='none', model='with_spot')
# Comparing Light Curves
# ------------------------------
# In[7]:
afig, mplfig = b.plot(show=True, legend=True)
# In[ ]:
|
gpl-3.0
|
darribas/pysal
|
pysal/contrib/pdutilities/dbf_utilities.py
|
3
|
5707
|
"""miscellaneous file manipulation utilities
"""
import numpy as np
import pysal as ps
import pandas as pd
def check_dups(li):
"""checks duplicates in list of ID values
ID values must be read in as a list
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
li : list of ID values
Returns
-------
a list with the duplicate IDs
"""
return list(set([x for x in li if li.count(x) > 1]))
def dbfdups(dbfpath,idvar):
"""checks duplicates in a dBase file
ID variable must be specified correctly
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
dbfpath : file path to dBase file
idvar : ID variable in dBase file
Returns
-------
a list with the duplicate IDs
"""
db = ps.open(dbfpath,'r')
li = db.by_col(idvar)
return list(set([x for x in li if li.count(x) > 1]))
def df2dbf(df, dbf_path, my_specs=None):
'''
Convert a pandas.DataFrame into a dbf.
__author__ = "Dani Arribas-Bel <[email protected]> "
...
Arguments
---------
df : DataFrame
Pandas dataframe object to be entirely written out to a dbf
dbf_path : str
Path to the output dbf. It is also returned by the function
my_specs : list
List with the field_specs to use for each column.
Defaults to None and applies the following scheme:
* int: ('N', 14, 0)
* float: ('N', 14, 14)
* str: ('C', 14, 0)
'''
if my_specs:
specs = my_specs
else:
type2spec = {int: ('N', 20, 0),
np.int64: ('N', 20, 0),
np.int32: ('N', 20, 0),
np.int16: ('N', 20, 0),
np.int8: ('N', 20, 0),
float: ('N', 36, 15),
np.float64: ('N', 36, 15),
np.float32: ('N', 36, 15),
str: ('C', 14, 0)
}
types = [type(df[i].iloc[0]) for i in df.columns]
specs = [type2spec[t] for t in types]
db = ps.open(dbf_path, 'w')
db.header = list(df.columns)
db.field_spec = specs
for i, row in df.T.iteritems():
db.write(row)
db.close()
return dbf_path
def dbf2df(dbf_path, index=None, cols=False, incl_index=False):
'''
Read a dbf file as a pandas.DataFrame, optionally selecting the index
variable and which columns are to be loaded.
__author__ = "Dani Arribas-Bel <[email protected]> "
...
Arguments
---------
dbf_path : str
Path to the DBF file to be read
index : str
Name of the column to be used as the index of the DataFrame
cols : list
List with the names of the columns to be read into the
DataFrame. Defaults to False, which reads the whole dbf
incl_index : Boolean
If True index is included in the DataFrame as a
column too. Defaults to False
Returns
-------
df : DataFrame
pandas.DataFrame object created
'''
db = ps.open(dbf_path)
if cols:
if incl_index:
cols.append(index)
vars_to_read = cols
else:
vars_to_read = db.header
data = dict([(var, db.by_col(var)) for var in vars_to_read])
if index:
index = db.by_col(index)
db.close()
return pd.DataFrame(data, index=index, columns=vars_to_read)
else:
db.close()
return pd.DataFrame(data,columns=vars_to_read)
def dbfjoin(dbf1_path,dbf2_path,out_path,joinkey1,joinkey2):
'''
Wrapper function to merge two dbf files into a new dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses dbf2df and df2dbf to read and write the dbf files into a pandas
DataFrame. Uses all default settings for dbf2df and df2dbf (see docs
for specifics).
...
Arguments
---------
dbf1_path : str
Path to the first (left) dbf file
dbf2_path : str
Path to the second (right) dbf file
out_path : str
Path to the output dbf file (returned by the function)
joinkey1 : str
Variable name for the key in the first dbf. Must be specified.
Key must take unique values.
joinkey2 : str
Variable name for the key in the second dbf. Must be specified.
Key must take unique values.
Returns
-------
dbfpath : path to output file
'''
df1 = dbf2df(dbf1_path,index=joinkey1)
df2 = dbf2df(dbf2_path,index=joinkey2)
dfbig = pd.merge(df1,df2,left_on=joinkey1,right_on=joinkey2,sort=False)
dp = df2dbf(dfbig,out_path)
return dp
def dta2dbf(dta_path,dbf_path):
"""
Wrapper function to convert a stata dta file into a dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses df2dbf to write the dbf files from a pandas
DataFrame. Uses all default settings for df2dbf (see docs
for specifics).
...
Arguments
---------
dta_path : str
Path to the Stata dta file
dbf_path : str
Path to the output dbf file
Returns
-------
dbf_path : path to output file
"""
db = pd.read_stata(dta_path)
dp = df2dbf(db,dbf_path)
return dp
|
bsd-3-clause
|
agadiraju/519finalproject
|
svm/test_svms_count.py
|
1
|
1138
|
print(__doc__)
import numpy as np
from sklearn import metrics
from sklearn import datasets
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from datetime import datetime
from import_train import rmsle
from import_train import import_training_file
import sys
if __name__ == '__main__':
(X, y) = import_training_file(sys.argv[1], True)
n,d = X.shape
nTrain = 0.5*n
# shuffle the data
#idx = np.arange(n)
#np.random.seed(42)
#np.random.shuffle(idx)
#y = y[idx]
#X = X[idx]
# split the data
Xtrain = X[:nTrain,:]
ytrain = y[:nTrain]
Xtest = X[nTrain:,:]
ytest = y[nTrain:]
#linear
clf = SVC(kernel='linear')
clf.fit(Xtrain,ytrain)
pred = clf.predict(Xtest)
print "RMSE linear = ", rmsle(ytest, pred)
#polynomial
clf = SVC(kernel='poly')
clf.fit(Xtrain,ytrain)
pred = clf.predict(Xtest)
print "RMSE poly = ", rmsle(ytest, pred)
#rbf
clf = SVC(kernel='rbf')
clf.fit(Xtrain,ytrain)
pred = clf.predict(Xtest)
print "RMSE rbf = ", rmsle(ytest, pred)
#sigmoid
clf = SVC(kernel='sigmoid')
clf.fit(Xtrain,ytrain)
pred = clf.predict(Xtest)
print "RMSE sigmoid = ", rmsle(ytest, pred)
|
mit
|
crichardson17/starburst_atlas
|
Low_resolution_sims/Dusty_LowRes/Padova_cont/padova_cont_5/fullgrid/Optical2.py
|
30
|
9412
|
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
line = [56, #AR 4 4740
58, #4861
59, #O III 4959
60, #O 3 5007
61, #N 1 5200
63, #O 1 5577
64, #N 2 5755
65, #HE 1 5876
66, #O 1 6300
67, #S 3 6312
68, #O 1 6363
69, #H 1 6563
70, #N 2 6584
71, #S II 6716
72, #S 2 6720
73] #S II 6731
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty Optical Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_Optical_lines_cntd.pdf')
plt.clf()
print "figure saved"
|
gpl-2.0
|
andyh616/mne-python
|
mne/utils.py
|
3
|
62227
|
# -*- coding: utf-8 -*-
"""Some utility functions"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import warnings
import logging
import time
from distutils.version import LooseVersion
import os
import os.path as op
from functools import wraps
import inspect
from string import Formatter
import subprocess
import sys
import tempfile
import shutil
from shutil import rmtree
from math import log, ceil
import json
import ftplib
import hashlib
from functools import partial
import atexit
import numpy as np
import scipy
from scipy import linalg, sparse
from .externals.six.moves import urllib
from .externals.six import string_types, StringIO, BytesIO
from .externals.decorator import decorator
from .fixes import isclose
logger = logging.getLogger('mne') # one selection here used across mne-python
logger.propagate = False # don't propagate (in case of multiple imports)
def _memory_usage(*args, **kwargs):
if isinstance(args[0], tuple):
args[0][0](*args[0][1], **args[0][2])
elif not isinstance(args[0], int): # can be -1 for current use
args[0]()
return [-1]
try:
from memory_profiler import memory_usage
except ImportError:
memory_usage = _memory_usage
def nottest(f):
"""Decorator to mark a function as not a test"""
f.__test__ = False
return f
###############################################################################
# RANDOM UTILITIES
def _sort_keys(x):
"""Sort and return keys of dict"""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def object_hash(x, h=None):
"""Hash a reasonable python object
Parameters
----------
x : object
Object to hash. Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
h : hashlib HASH object | None
Optional, object to add the hash to. None creates an MD5 hash.
Returns
-------
digest : int
The digest resulting from the hash.
"""
if h is None:
h = hashlib.md5()
if isinstance(x, dict):
keys = _sort_keys(x)
for key in keys:
object_hash(key, h)
object_hash(x[key], h)
elif isinstance(x, (list, tuple)):
h.update(str(type(x)).encode('utf-8'))
for xx in x:
object_hash(xx, h)
elif isinstance(x, bytes):
# must come before "str" below
h.update(x)
elif isinstance(x, (string_types, float, int, type(None))):
h.update(str(type(x)).encode('utf-8'))
h.update(str(x).encode('utf-8'))
elif isinstance(x, np.ndarray):
x = np.asarray(x)
h.update(str(x.shape).encode('utf-8'))
h.update(str(x.dtype).encode('utf-8'))
h.update(x.tostring())
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
return int(h.hexdigest(), 16)
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as x1.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
out = ''
if type(a) != type(b):
out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
elif isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' x1 missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' x2 missing key %s\n' % key
else:
out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for xx1, xx2 in zip(a, b):
out += object_diff(xx1, xx2, pre='')
elif isinstance(a, (string_types, int, float, bytes)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' a is None, b is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not np.array_equal(a, b):
out += pre + ' array mismatch\n'
elif isinstance(a, (StringIO, BytesIO)):
if a.getvalue() != b.getvalue():
out += pre + ' StringIO mismatch\n'
elif sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def split_list(l, n):
"""split list in n (approx) equal pieces"""
n = int(n)
sz = len(l) // n
for i in range(n - 1):
yield l[i * sz:(i + 1) * sz]
yield l[(n - 1) * sz:]
def create_chunks(sequence, size):
"""Generate chunks from a sequence
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def sum_squared(X):
"""Compute norm of an array
Parameters
----------
X : array
Data whose norm must be found
Returns
-------
value : float
Sum of squares of the input array X
"""
X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')
return np.dot(X_flat, X_flat)
def check_fname(fname, filetype, endings):
"""Enforce MNE filename conventions
Parameters
----------
fname : str
Name of the file.
filetype : str
Type of file. e.g., ICA, Epochs etc.
endings : tuple
Acceptable endings for the filename.
"""
print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]])
if not fname.endswith(endings):
warnings.warn('This filename (%s) does not conform to MNE naming '
'conventions. All %s files should end with '
'%s' % (fname, filetype, print_endings))
class WrapStdOut(object):
"""Ridiculous class to work around how doctest captures stdout"""
def __getattr__(self, name):
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
return getattr(sys.stdout, name)
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
def __del__(self):
rmtree(self._path, ignore_errors=True)
def estimate_rank(data, tol=1e-4, return_singular=False,
norm=True, copy=True):
"""Helper to estimate the rank of data
This function will normalize the rows of the data (typically
channels or vertices) such that non-zero singular values
should be close to one.
Parameters
----------
data : array
Data to estimate the rank of (should be 2-dimensional).
tol : float
Tolerance for singular values to consider non-zero in
calculating the rank. The singular values are calculated
in this method such that independent data are expected to
have singular value around one.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
norm : bool
If True, data will be scaled by their estimated row-wise norm.
Else data are assumed to be scaled. Defaults to True.
copy : bool
If False, values in data will be modified in-place during
rank estimation (saves memory).
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
if copy is True:
data = data.copy()
if norm is True:
norms = _compute_row_norms(data)
data /= norms[:, np.newaxis]
s = linalg.svd(data, compute_uv=False, overwrite_a=True)
rank = np.sum(s >= tol)
if return_singular is True:
return rank, s
else:
return rank
def _compute_row_norms(data):
"""Compute scaling based on estimated norm"""
norms = np.sqrt(np.sum(data ** 2, axis=1))
norms[norms == 0] = 1.0
return norms
def _reject_data_segments(data, reject, flat, decim, info, tstep):
"""Reject data segments using peak-to-peak amplitude
"""
from .epochs import _is_good
from .io.pick import channel_indices_by_type
data_clean = np.empty_like(data)
idx_by_type = channel_indices_by_type(info)
step = int(ceil(tstep * info['sfreq']))
if decim is not None:
step = int(ceil(step / float(decim)))
this_start = 0
this_stop = 0
drop_inds = []
for first in range(0, data.shape[1], step):
last = first + step
data_buffer = data[:, first:last]
if data_buffer.shape[1] < (last - first):
break # end of the time segment
if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
flat, ignore_chs=info['bads']):
this_stop = this_start + data_buffer.shape[1]
data_clean[:, this_start:this_stop] = data_buffer
this_start += data_buffer.shape[1]
else:
logger.info("Artifact detected in [%d, %d]" % (first, last))
drop_inds.append((first, last))
data = data_clean[:, :this_stop]
if not data.any():
raise RuntimeError('No clean segment found. Please '
'consider updating your rejection '
'thresholds.')
return data, drop_inds
class _FormatDict(dict):
"""Helper for pformat()"""
def __missing__(self, key):
return "{" + key + "}"
def pformat(temp, **fmt):
"""Partially format a template string.
Examples
--------
>>> pformat("{a}_{b}", a='x')
'x_{b}'
"""
formatter = Formatter()
mapping = _FormatDict(fmt)
return formatter.vformat(temp, (), mapping)
def trait_wraith(*args, **kwargs):
# Stand in for traits to allow importing traits based modules when the
# traits library is not installed
return lambda x: x
###############################################################################
# DECORATORS
# Following deprecated class copied from scikit-learn
# force show of DeprecationWarning even on python 2.7
warnings.simplefilter('default')
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses::
>>> from mne.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<mne.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra: string
To be added to the deprecation messages.
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
# scikit-learn will not import on all platforms b/c it can be
# sklearn or scikits.learn, so a self-contained example is used above
def __init__(self, extra=''):
self.extra = extra
def __call__(self, obj):
"""Call
Parameters
----------
obj : object
Object to call.
"""
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
@decorator
def verbose(function, *args, **kwargs):
"""Improved verbose decorator to allow functions to override log-level
Do not call this directly to set global verbosity level, instead use
set_log_level().
Parameters
----------
function : function
Function to be decorated by setting the verbosity level.
Returns
-------
dec : function
The decorated function
"""
arg_names = inspect.getargspec(function).args
default_level = verbose_level = None
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
if 'verbose' in arg_names:
verbose_level = args[arg_names.index('verbose')]
elif 'verbose' in kwargs:
verbose_level = kwargs.pop('verbose')
# This ensures that object.method(verbose=None) will use object.verbose
verbose_level = default_level if verbose_level is None else verbose_level
if verbose_level is not None:
old_level = set_log_level(verbose_level, True)
# set it back if we get an exception
try:
return function(*args, **kwargs)
finally:
set_log_level(old_level)
return function(*args, **kwargs)
@nottest
def slow_test(f):
"""Decorator for slow tests"""
f.slow_test = True
return f
@nottest
def ultra_slow_test(f):
"""Decorator for ultra slow tests"""
f.ultra_slow_test = True
f.slow_test = True
return f
def has_nibabel(vox2ras_tkr=False):
"""Determine if nibabel is installed
Parameters
----------
vox2ras_tkr : bool
If True, require nibabel has vox2ras_tkr support.
Returns
-------
has : bool
True if the user has nibabel.
"""
try:
import nibabel
out = True
if vox2ras_tkr: # we need MGHHeader to have vox2ras_tkr param
out = (getattr(getattr(getattr(nibabel, 'MGHImage', 0),
'header_class', 0),
'get_vox2ras_tkr', None) is not None)
return out
except ImportError:
return False
def has_mne_c():
"""Aux function"""
return 'MNE_ROOT' in os.environ
def has_freesurfer():
"""Aux function"""
return 'FREESURFER_HOME' in os.environ
def requires_nibabel(vox2ras_tkr=False):
"""Aux function"""
extra = ' with vox2ras_tkr support' if vox2ras_tkr else ''
return np.testing.dec.skipif(not has_nibabel(vox2ras_tkr),
'Requires nibabel%s' % extra)
def requires_scipy_version(min_version):
"""Helper for testing"""
return np.testing.dec.skipif(not check_scipy_version(min_version),
'Requires scipy version >= %s' % min_version)
def requires_module(function, name, call):
"""Decorator to skip test if package is not available"""
try:
from nose.plugins.skip import SkipTest
except ImportError:
SkipTest = AssertionError
@wraps(function)
def dec(*args, **kwargs):
skip = False
try:
exec(call) in globals(), locals()
except Exception:
skip = True
if skip is True:
raise SkipTest('Test %s skipped, requires %s'
% (function.__name__, name))
return function(*args, **kwargs)
return dec
_pandas_call = """
import pandas
version = LooseVersion(pandas.__version__)
if version < '0.8.0':
raise ImportError
"""
_sklearn_call = """
required_version = '0.14'
import sklearn
version = LooseVersion(sklearn.__version__)
if version < required_version:
raise ImportError
"""
_mayavi_call = """
from mayavi import mlab
mlab.options.backend = 'test'
"""
_mne_call = """
if not has_mne_c():
raise ImportError
"""
_fs_call = """
if not has_freesurfer():
raise ImportError
"""
_n2ft_call = """
if 'NEUROMAG2FT_ROOT' not in os.environ:
raise ImportError
"""
_fs_or_ni_call = """
if not has_nibabel() and not has_freesurfer():
raise ImportError
"""
requires_pandas = partial(requires_module, name='pandas', call=_pandas_call)
requires_sklearn = partial(requires_module, name='sklearn', call=_sklearn_call)
requires_mayavi = partial(requires_module, name='mayavi', call=_mayavi_call)
requires_mne = partial(requires_module, name='MNE-C', call=_mne_call)
requires_freesurfer = partial(requires_module, name='Freesurfer',
call=_fs_call)
requires_neuromag2ft = partial(requires_module, name='neuromag2ft',
call=_n2ft_call)
requires_fs_or_nibabel = partial(requires_module, name='nibabel or Freesurfer',
call=_fs_or_ni_call)
requires_tvtk = partial(requires_module, name='TVTK',
call='from tvtk.api import tvtk')
requires_statsmodels = partial(requires_module, name='statsmodels',
call='import statsmodels')
requires_patsy = partial(requires_module, name='patsy',
call='import patsy')
requires_pysurfer = partial(requires_module, name='PySurfer',
call='from surfer import Brain')
requires_PIL = partial(requires_module, name='PIL',
call='from PIL import Image')
requires_good_network = partial(
requires_module, name='good network connection',
call='if int(os.environ.get("MNE_SKIP_NETWORK_TESTS", 0)):\n'
' raise ImportError')
requires_nitime = partial(requires_module, name='nitime',
call='import nitime')
requires_traits = partial(requires_module, name='traits',
call='import traits')
requires_h5py = partial(requires_module, name='h5py', call='import h5py')
def _check_mayavi_version(min_version='4.3.0'):
"""Raise a RuntimeError if the required version of mayavi is not available
Parameters
----------
min_version : str
The version string. Anything that matches
``'(\\d+ | [a-z]+ | \\.)'``
"""
import mayavi
require_mayavi = LooseVersion(min_version)
if LooseVersion(mayavi.__version__) < require_mayavi:
raise RuntimeError("Need mayavi >= %s" % require_mayavi)
def check_sklearn_version(min_version):
"""Check minimum sklearn version required
Parameters
----------
min_version : str
The version string. Anything that matches
``'(\\d+ | [a-z]+ | \\.)'``
"""
ok = True
try:
import sklearn
this_version = LooseVersion(sklearn.__version__)
if this_version < min_version:
ok = False
except ImportError:
ok = False
return ok
def check_scipy_version(min_version):
"""Check minimum sklearn version required
Parameters
----------
min_version : str
The version string. Anything that matches
``'(\\d+ | [a-z]+ | \\.)'``
"""
this_version = LooseVersion(scipy.__version__)
return False if this_version < min_version else True
@verbose
def run_subprocess(command, verbose=None, *args, **kwargs):
"""Run command using subprocess.Popen
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str
Command to run as subprocess (see subprocess.Popen documentation).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
"""
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
# Check the PATH environment variable. If run_subprocess() is to be called
# frequently this should be refactored so as to only check the path once.
env = kwargs.get('env', os.environ)
if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
msg = ("Your PATH environment variable contains at least one path "
"starting with a tilde ('~') character. Such paths are not "
"interpreted correctly from within Python. It is recommended "
"that you use '$HOME' instead of '~'.")
warnings.warn(msg)
logger.info("Running subprocess: %s" % ' '.join(command))
try:
p = subprocess.Popen(command, *args, **kwargs)
except Exception:
logger.error('Command not found: %s' % (command[0],))
raise
stdout_, stderr = p.communicate()
stdout_ = '' if stdout_ is None else stdout_.decode('utf-8')
stderr = '' if stderr is None else stderr.decode('utf-8')
if stdout_.strip():
logger.info("stdout:\n%s" % stdout_)
if stderr.strip():
logger.info("stderr:\n%s" % stderr)
output = (stdout_, stderr)
if p.returncode:
print(output)
err_fun = subprocess.CalledProcessError.__init__
if 'output' in inspect.getargspec(err_fun).args:
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
###############################################################################
# LOGGING
def set_log_level(verbose=None, return_old_level=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable MNE_LOGGING_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = get_config('MNE_LOGGING_LEVEL', 'INFO')
elif isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
logger = logging.getLogger('mne')
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
"""Convenience function for setting the log to print to a file
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
https://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool, or None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
logger = logging.getLogger('mne')
handlers = logger.handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.')
mode = 'w' if overwrite is True else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
###############################################################################
# CONFIG / PREFS
def get_subjects_dir(subjects_dir=None, raise_error=False):
"""Safely use subjects_dir input to return SUBJECTS_DIR
Parameters
----------
subjects_dir : str | None
If a value is provided, return subjects_dir. Otherwise, look for
SUBJECTS_DIR config and return the result.
raise_error : bool
If True, raise a KeyError if no value for SUBJECTS_DIR can be found
(instead of returning None).
Returns
-------
value : str | None
The SUBJECTS_DIR value.
"""
if subjects_dir is None:
subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
return subjects_dir
_temp_home_dir = None
def _get_extra_data_path(home_dir=None):
"""Get path to extra data (config, tables, etc.)"""
global _temp_home_dir
if home_dir is None:
# this has been checked on OSX64, Linux64, and Win32
if 'nt' == os.name.lower():
home_dir = os.getenv('APPDATA')
else:
# This is a more robust way of getting the user's home folder on
# Linux platforms (not sure about OSX, Unix or BSD) than checking
# the HOME environment variable. If the user is running some sort
# of script that isn't launched via the command line (e.g. a script
# launched via Upstart) then the HOME environment variable will
# not be set.
if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
if _temp_home_dir is None:
_temp_home_dir = tempfile.mkdtemp()
atexit.register(partial(shutil.rmtree, _temp_home_dir,
ignore_errors=True))
home_dir = _temp_home_dir
else:
home_dir = os.path.expanduser('~')
if home_dir is None:
raise ValueError('mne-python config file path could '
'not be determined, please report this '
'error to mne-python developers')
return op.join(home_dir, '.mne')
def get_config_path(home_dir=None):
"""Get path to standard mne-python config file
Parameters
----------
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
config_path : str
The path to the mne-python configuration file. On windows, this
will be '%APPDATA%\.mne\mne-python.json'. On every other
system, this will be ~/.mne/mne-python.json.
"""
val = op.join(_get_extra_data_path(home_dir=home_dir),
'mne-python.json')
return val
def set_cache_dir(cache_dir):
"""Set the directory to be used for temporary file storage.
This directory is used by joblib to store memmapped arrays,
which reduces memory requirements and speeds up parallel
computation.
Parameters
----------
cache_dir: str or None
Directory to use for temporary file storage. None disables
temporary file storage.
"""
if cache_dir is not None and not op.exists(cache_dir):
raise IOError('Directory %s does not exist' % cache_dir)
set_config('MNE_CACHE_DIR', cache_dir)
def set_memmap_min_size(memmap_min_size):
"""Set the minimum size for memmaping of arrays for parallel processing
Parameters
----------
memmap_min_size: str or None
Threshold on the minimum size of arrays that triggers automated memory
mapping for parallel processing, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
"""
if memmap_min_size is not None:
if not isinstance(memmap_min_size, string_types):
raise ValueError('\'memmap_min_size\' has to be a string.')
if memmap_min_size[-1] not in ['K', 'M', 'G']:
raise ValueError('The size has to be given in kilo-, mega-, or '
'gigabytes, e.g., 100K, 500M, 1G.')
set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size)
# List the known configuration values
known_config_types = [
'MNE_BROWSE_RAW_SIZE',
'MNE_CUDA_IGNORE_PRECISION',
'MNE_DATA',
'MNE_DATASETS_MEGSIM_PATH',
'MNE_DATASETS_SAMPLE_PATH',
'MNE_DATASETS_SOMATO_PATH',
'MNE_DATASETS_SPM_FACE_PATH',
'MNE_DATASETS_EEGBCI_PATH',
'MNE_DATASETS_BRAINSTORM_PATH',
'MNE_DATASETS_TESTING_PATH',
'MNE_LOGGING_LEVEL',
'MNE_USE_CUDA',
'SUBJECTS_DIR',
'MNE_CACHE_DIR',
'MNE_MEMMAP_MIN_SIZE',
'MNE_SKIP_TESTING_DATASET_TESTS',
'MNE_DATASETS_SPM_FACE_DATASETS_TESTS'
]
# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
known_config_wildcards = [
'MNE_STIM_CHANNEL',
]
def get_config(key=None, default=None, raise_error=False, home_dir=None):
"""Read mne(-python) preference from env, then mne-python config
Parameters
----------
key : None | str
The preference key to look for. The os evironment is searched first,
then the mne-python config file is parsed.
If None, all the config parameters present in the path are returned.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
value : dict | str | None
The preference key value.
See Also
--------
set_config
"""
if key is not None and not isinstance(key, string_types):
raise TypeError('key must be a string')
# first, check to see if key is in env
if key is not None and key in os.environ:
return os.environ[key]
# second, look for it in mne-python config file
config_path = get_config_path(home_dir=home_dir)
if not op.isfile(config_path):
key_found = False
val = default
else:
with open(config_path, 'r') as fid:
config = json.load(fid)
if key is None:
return config
key_found = key in config
val = config.get(key, default)
if not key_found and raise_error is True:
meth_1 = 'os.environ["%s"] = VALUE' % key
meth_2 = 'mne.utils.set_config("%s", VALUE)' % key
raise KeyError('Key "%s" not found in environment or in the '
'mne-python config file: %s '
'Try either:'
' %s for a temporary solution, or:'
' %s for a permanent one. You can also '
'set the environment variable before '
'running python.'
% (key, config_path, meth_1, meth_2))
return val
def set_config(key, value, home_dir=None):
"""Set mne-python preference in config
Parameters
----------
key : str
The preference key to set.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
See Also
--------
get_config
"""
if not isinstance(key, string_types):
raise TypeError('key must be a string')
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
if not isinstance(value, string_types) and value is not None:
raise TypeError('value must be a string or None')
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warnings.warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path(home_dir=home_dir)
if op.isfile(config_path):
with open(config_path, 'r') as fid:
config = json.load(fid)
else:
config = dict()
logger.info('Attempting to create new mne-python configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
else:
config[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
directory = op.dirname(config_path)
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
class ProgressBar(object):
"""Class for generating a command-line progressbar
Parameters
----------
max_value : int
Maximum value of process (e.g. number of samples to process, bytes to
download, etc.).
initial_value : int
Initial value of process, useful when resuming process from a specific
value, defaults to 0.
mesg : str
Message to include at end of progress bar.
max_chars : int
Number of characters to use for progress bar (be sure to save some room
for the message and % complete as well).
progress_character : char
Character in the progress bar that indicates the portion completed.
spinner : bool
Show a spinner. Useful for long-running processes that may not
increment the progress bar very often. This provides the user with
feedback that the progress has not stalled.
Example
-------
>>> progress = ProgressBar(13000)
>>> progress.update(3000) # doctest: +SKIP
[......... ] 23.07692 |
>>> progress.update(6000) # doctest: +SKIP
[.................. ] 46.15385 |
>>> progress = ProgressBar(13000, spinner=True)
>>> progress.update(3000) # doctest: +SKIP
[......... ] 23.07692 |
>>> progress.update(6000) # doctest: +SKIP
[.................. ] 46.15385 /
"""
spinner_symbols = ['|', '/', '-', '\\']
template = '\r[{0}{1}] {2:.05f} {3} {4} '
def __init__(self, max_value, initial_value=0, mesg='', max_chars=40,
progress_character='.', spinner=False, verbose_bool=True):
self.cur_value = initial_value
self.max_value = float(max_value)
self.mesg = mesg
self.max_chars = max_chars
self.progress_character = progress_character
self.spinner = spinner
self.spinner_index = 0
self.n_spinner = len(self.spinner_symbols)
self._do_print = verbose_bool
def update(self, cur_value, mesg=None):
"""Update progressbar with current value of process
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
# Ensure floating-point division so we can get fractions of a percent
# for the progressbar.
self.cur_value = cur_value
progress = min(float(self.cur_value) / self.max_value, 1.)
num_chars = int(progress * self.max_chars)
num_left = self.max_chars - num_chars
# Update the message
if mesg is not None:
self.mesg = mesg
# The \r tells the cursor to return to the beginning of the line rather
# than starting a new line. This allows us to have a progressbar-style
# display in the console window.
bar = self.template.format(self.progress_character * num_chars,
' ' * num_left,
progress * 100,
self.spinner_symbols[self.spinner_index],
self.mesg)
# Force a flush because sometimes when using bash scripts and pipes,
# the output is not printed until after the program exits.
if self._do_print:
sys.stdout.write(bar)
sys.stdout.flush()
# Increament the spinner
if self.spinner:
self.spinner_index = (self.spinner_index + 1) % self.n_spinner
def update_with_increment_value(self, increment_value, mesg=None):
"""Update progressbar with the value of the increment instead of the
current value of process as in update()
Parameters
----------
increment_value : int
Value of the increment of process. The percent of the progressbar
will be computed as
(self.cur_value + increment_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''.
"""
self.cur_value += increment_value
self.update(self.cur_value, mesg)
def _chunk_read(response, local_file, initial_size=0, verbose_bool=True):
"""Download a file chunk by chunk and show advancement
Can also be used when resuming downloads over http.
Parameters
----------
response: urllib.response.addinfourl
Response to the download request in order to get file size.
local_file: file
Hard disk file where data should be written.
initial_size: int, optional
If resuming, indicate the initial size of the file.
Notes
-----
The chunk size will be automatically adapted based on the connection
speed.
"""
# Adapted from NISL:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
# Returns only amount left to download when resuming, not the size of the
# entire file
total_size = int(response.headers.get('Content-Length', '1').strip())
total_size += initial_size
progress = ProgressBar(total_size, initial_value=initial_size,
max_chars=40, spinner=True, mesg='downloading',
verbose_bool=verbose_bool)
chunk_size = 8192 # 2 ** 13
while True:
t0 = time.time()
chunk = response.read(chunk_size)
dt = time.time() - t0
if dt < 0.001:
chunk_size *= 2
elif dt > 0.5 and chunk_size > 8192:
chunk_size = chunk_size // 2
if not chunk:
if verbose_bool:
sys.stdout.write('\n')
sys.stdout.flush()
break
_chunk_write(chunk, local_file, progress)
def _chunk_read_ftp_resume(url, temp_file_name, local_file, verbose_bool=True):
"""Resume downloading of a file from an FTP server"""
# Adapted from: https://pypi.python.org/pypi/fileDownloader.py
# but with changes
parsed_url = urllib.parse.urlparse(url)
file_name = os.path.basename(parsed_url.path)
server_path = parsed_url.path.replace(file_name, "")
unquoted_server_path = urllib.parse.unquote(server_path)
local_file_size = os.path.getsize(temp_file_name)
data = ftplib.FTP()
if parsed_url.port is not None:
data.connect(parsed_url.hostname, parsed_url.port)
else:
data.connect(parsed_url.hostname)
data.login()
if len(server_path) > 1:
data.cwd(unquoted_server_path)
data.sendcmd("TYPE I")
data.sendcmd("REST " + str(local_file_size))
down_cmd = "RETR " + file_name
file_size = data.size(file_name)
progress = ProgressBar(file_size, initial_value=local_file_size,
max_chars=40, spinner=True, mesg='downloading',
verbose_bool=verbose_bool)
# Callback lambda function that will be passed the downloaded data
# chunk and will write it to file and update the progress bar
def chunk_write(chunk):
return _chunk_write(chunk, local_file, progress)
data.retrbinary(down_cmd, chunk_write)
data.close()
sys.stdout.write('\n')
sys.stdout.flush()
def _chunk_write(chunk, local_file, progress):
"""Write a chunk to file and update the progress bar"""
local_file.write(chunk)
progress.update_with_increment_value(len(chunk))
@verbose
def _fetch_file(url, file_name, print_destination=True, resume=True,
hash_=None, verbose=None):
"""Load requested file, downloading it if needed or requested
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
print_destination: bool, optional
If true, destination of where file was saved will be printed after
download finishes.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
# Adapted from NISL:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
if hash_ is not None and (not isinstance(hash_, string_types) or
len(hash_) != 32):
raise ValueError('Bad hash value given, should be a 32-character '
'string:\n%s' % (hash_,))
temp_file_name = file_name + ".part"
local_file = None
initial_size = 0
verbose_bool = (logger.level <= 20) # 20 is info
try:
# Checking file size and displaying it alongside the download url
u = urllib.request.urlopen(url, timeout=10.)
try:
file_size = int(u.headers.get('Content-Length', '1').strip())
finally:
u.close()
del u
logger.info('Downloading data from %s (%s)\n'
% (url, sizeof_fmt(file_size)))
# Downloading data
if resume and os.path.exists(temp_file_name):
local_file = open(temp_file_name, "ab")
# Resuming HTTP and FTP downloads requires different procedures
scheme = urllib.parse.urlparse(url).scheme
if scheme in ('http', 'https'):
local_file_size = os.path.getsize(temp_file_name)
# If the file exists, then only download the remainder
req = urllib.request.Request(url)
req.headers["Range"] = "bytes=%s-" % local_file_size
try:
data = urllib.request.urlopen(req)
except Exception:
# There is a problem that may be due to resuming, some
# servers may not support the "Range" header. Switch back
# to complete download method
logger.info('Resuming download failed. Attempting to '
'restart downloading the entire file.')
local_file.close()
_fetch_file(url, file_name, resume=False)
else:
_chunk_read(data, local_file, initial_size=local_file_size,
verbose_bool=verbose_bool)
data.close()
del data # should auto-close
else:
_chunk_read_ftp_resume(url, temp_file_name, local_file,
verbose_bool=verbose_bool)
else:
local_file = open(temp_file_name, "wb")
data = urllib.request.urlopen(url)
try:
_chunk_read(data, local_file, initial_size=initial_size,
verbose_bool=verbose_bool)
finally:
data.close()
del data # should auto-close
# temp file must be closed prior to the move
if not local_file.closed:
local_file.close()
# check md5sum
if hash_ is not None:
logger.info('Verifying download hash.')
md5 = md5sum(temp_file_name)
if hash_ != md5:
raise RuntimeError('Hash mismatch for downloaded file %s, '
'expected %s but got %s'
% (temp_file_name, hash_, md5))
shutil.move(temp_file_name, file_name)
if print_destination is True:
logger.info('File saved as %s.\n' % file_name)
except Exception as e:
logger.error('Error while fetching file %s.'
' Dataset fetching aborted.' % url)
logger.error("Error: %s", e)
raise
finally:
if local_file is not None:
if not local_file.closed:
local_file.close()
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str"""
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
"""Human friendly file size"""
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = '{0:.%sf} {1}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
def _url_to_local_path(url, path):
"""Mirror a url path in a local destination (keeping folder structure)"""
destination = urllib.parse.urlparse(url).path
# First char should be '/', and it needs to be discarded
if len(destination) < 2 or destination[0] != '/':
raise ValueError('Invalid URL')
destination = os.path.join(path,
urllib.request.url2pathname(destination)[1:])
return destination
def _get_stim_channel(stim_channel, info):
"""Helper to determine the appropriate stim_channel
First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
are read. If these are not found, it will fall back to 'STI 014' if
present, then fall back to the first channel of type 'stim', if present.
Parameters
----------
stim_channel : str | list of str | None
The stim channel selected by the user.
info : instance of Info
An information structure containing information about the channels.
Returns
-------
stim_channel : str | list of str
The name of the stim channel(s) to use
"""
if stim_channel is not None:
if not isinstance(stim_channel, list):
if not isinstance(stim_channel, string_types):
raise TypeError('stim_channel must be a str, list, or None')
stim_channel = [stim_channel]
if not all(isinstance(s, string_types) for s in stim_channel):
raise TypeError('stim_channel list must contain all strings')
return stim_channel
stim_channel = list()
ch_count = 0
ch = get_config('MNE_STIM_CHANNEL')
while(ch is not None and ch in info['ch_names']):
stim_channel.append(ch)
ch_count += 1
ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
if ch_count > 0:
return stim_channel
if 'STI 014' in info['ch_names']:
return ['STI 014']
from .io.pick import pick_types
stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
if len(stim_channel) > 0:
stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
return stim_channel
raise ValueError("No stim channels found. Consider specifying them "
"manually using the 'stim_channel' parameter.")
def _check_fname(fname, overwrite):
"""Helper to check for file existence"""
if not isinstance(fname, string_types):
raise TypeError('file name is not a string')
if op.isfile(fname):
if not overwrite:
raise IOError('Destination file exists. Please use option '
'"overwrite=True" to force overwriting.')
else:
logger.info('Overwriting existing file.')
def _check_subject(class_subject, input_subject, raise_error=True):
"""Helper to get subject name from class"""
if input_subject is not None:
if not isinstance(input_subject, string_types):
raise ValueError('subject input must be a string')
else:
return input_subject
elif class_subject is not None:
if not isinstance(class_subject, string_types):
raise ValueError('Neither subject input nor class subject '
'attribute was a string')
else:
return class_subject
else:
if raise_error is True:
raise ValueError('Neither subject input nor class subject '
'attribute was a string')
return None
def _check_pandas_installed():
"""Aux function"""
try:
import pandas as pd
return pd
except ImportError:
raise RuntimeError('For this method to work the Pandas library is'
' required.')
def _check_pandas_index_arguments(index, defaults):
""" Helper function to check pandas index arguments """
if not any(isinstance(index, k) for k in (list, tuple)):
index = [index]
invalid_choices = [e for e in index if e not in defaults]
if invalid_choices:
options = [', '.join(e) for e in [invalid_choices, defaults]]
raise ValueError('[%s] is not an valid option. Valid index'
'values are \'None\' or %s' % tuple(options))
def _clean_names(names, remove_whitespace=False, before_dash=True):
""" Remove white-space on topo matching
This function handles different naming
conventions for old VS new VectorView systems (`remove_whitespace`).
Also it allows to remove system specific parts in CTF channel names
(`before_dash`).
Usage
-----
# for new VectorView (only inside layout)
ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)
# for CTF
ch_names = _clean_names(epochs.ch_names, before_dash=True)
"""
cleaned = []
for name in names:
if ' ' in name and remove_whitespace:
name = name.replace(' ', '')
if '-' in name and before_dash:
name = name.split('-')[0]
if name.endswith('_virtual'):
name = name[:-8]
cleaned.append(name)
return cleaned
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
bad_names = ['MovedModule'] # this is in six.py, and causes bad things
for mod in list(sys.modules.values()):
if mod.__class__.__name__ not in bad_names and hasattr(mod, reg):
getattr(mod, reg).clear()
# hack to deal with old scipy/numpy in tests
if os.getenv('TRAVIS') == 'true' and sys.version.startswith('2.6'):
warnings.simplefilter('default')
try:
np.rank([])
except Exception:
pass
warnings.simplefilter('always')
def _check_type_picks(picks):
"""helper to guarantee type integrity of picks"""
err_msg = 'picks must be None, a list or an array of integers'
if picks is None:
pass
elif isinstance(picks, list):
if not all(isinstance(i, int) for i in picks):
raise ValueError(err_msg)
picks = np.array(picks)
elif isinstance(picks, np.ndarray):
if not picks.dtype.kind == 'i':
raise ValueError(err_msg)
else:
raise ValueError(err_msg)
return picks
@nottest
def run_tests_if_main(measure_mem=False):
"""Run tests in a given file if it is run as a script"""
local_vars = inspect.currentframe().f_back.f_locals
if not local_vars.get('__name__', '') == '__main__':
return
# we are in a "__main__"
try:
import faulthandler
faulthandler.enable()
except Exception:
pass
with warnings.catch_warnings(record=True): # memory_usage internal dep.
mem = int(round(max(memory_usage(-1)))) if measure_mem else -1
if mem >= 0:
print('Memory consumption after import: %s' % mem)
t0 = time.time()
peak_mem, peak_name = mem, 'import'
max_elapsed, elapsed_name = 0, 'N/A'
count = 0
for name in sorted(list(local_vars.keys()), key=lambda x: x.lower()):
val = local_vars[name]
if name.startswith('_'):
continue
elif callable(val) and name.startswith('test'):
count += 1
doc = val.__doc__.strip() if val.__doc__ else name
sys.stdout.write('%s ... ' % doc)
sys.stdout.flush()
try:
t1 = time.time()
if measure_mem:
with warnings.catch_warnings(record=True): # dep warn
mem = int(round(max(memory_usage((val, (), {})))))
else:
val()
mem = -1
if mem >= peak_mem:
peak_mem, peak_name = mem, name
mem = (', mem: %s MB' % mem) if mem >= 0 else ''
elapsed = int(round(time.time() - t1))
if elapsed >= max_elapsed:
max_elapsed, elapsed_name = elapsed, name
sys.stdout.write('time: %s sec%s\n' % (elapsed, mem))
sys.stdout.flush()
except Exception as err:
if 'skiptest' in err.__class__.__name__.lower():
sys.stdout.write('SKIP (%s)\n' % str(err))
sys.stdout.flush()
else:
raise
elapsed = int(round(time.time() - t0))
sys.stdout.write('Total: %s tests\n• %s sec (%s sec for %s)\n• Peak memory'
' %s MB (%s)\n' % (count, elapsed, max_elapsed,
elapsed_name, peak_mem, peak_name))
class ArgvSetter(object):
"""Temporarily set sys.argv"""
def __init__(self, args=(), disable_stdout=True, disable_stderr=True):
self.argv = list(('python',) + args)
self.stdout = StringIO() if disable_stdout else sys.stdout
self.stderr = StringIO() if disable_stderr else sys.stderr
def __enter__(self):
self.orig_argv = sys.argv
sys.argv = self.argv
self.orig_stdout = sys.stdout
sys.stdout = self.stdout
self.orig_stderr = sys.stderr
sys.stderr = self.stderr
return self
def __exit__(self, *args):
sys.argv = self.orig_argv
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
def md5sum(fname, block_size=1048576): # 2 ** 20
"""Calculate the md5sum for a file
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexidecimal digest of the hash.
"""
md5 = hashlib.md5()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def _sphere_to_cartesian(theta, phi, r):
"""Transform spherical coordinates to cartesian"""
z = r * np.sin(phi)
rcos_phi = r * np.cos(phi)
x = rcos_phi * np.cos(theta)
y = rcos_phi * np.sin(theta)
return x, y, z
def create_slices(start, stop, step=None, length=1):
""" Generate slices of time indexes
Parameters
----------
start : int
Index where first slice should start.
stop : int
Index where last slice should maximally end.
length : int
Number of time sample included in a given slice.
step: int | None
Number of time samples separating two slices.
If step = None, step = length.
Returns
-------
slices : list
List of slice objects.
"""
# default parameters
if step is None:
step = length
# slicing
slices = [slice(t, t + length, 1) for t in
range(start, stop - length + 1, step)]
return slices
def _time_mask(times, tmin=None, tmax=None, strict=False):
"""Helper to safely find sample boundaries"""
tmin = -np.inf if tmin is None else tmin
tmax = np.inf if tmax is None else tmax
mask = (times >= tmin)
mask &= (times <= tmax)
if not strict:
mask |= isclose(times, tmin)
mask |= isclose(times, tmax)
return mask
def _get_fast_dot():
""""Helper to get fast dot"""
try:
from sklearn.utils.extmath import fast_dot
except ImportError:
fast_dot = np.dot
return fast_dot
def random_permutation(n_samples, random_state=None):
"""Helper to emulate the randperm matlab function.
It returns a vector containing a random permutation of the
integers between 0 and n_samples-1. It returns the same random numbers
than randperm matlab function whenever the random_state is the same
as the matlab's random seed.
This function is useful for comparing against matlab scripts
which use the randperm function.
Note: the randperm(n_samples) matlab function generates a random
sequence between 1 and n_samples, whereas
random_permutation(n_samples, random_state) function generates
a random sequence between 0 and n_samples-1, that is:
randperm(n_samples) = random_permutation(n_samples, random_state) - 1
Parameters
----------
n_samples : int
End point of the sequence to be permuted (excluded, i.e., the end point
is equal to n_samples-1)
random_state : int | None
Random seed for initializing the pseudo-random number generator.
Returns
-------
randperm : ndarray, int
Randomly permuted sequence between 0 and n-1.
"""
rng = check_random_state(random_state)
idx = rng.rand(n_samples)
randperm = np.argsort(idx)
return randperm
|
bsd-3-clause
|
siou83/trading-with-python
|
lib/csvDatabase.py
|
77
|
6045
|
# -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
return dt.datetime.strptime(name.split('_')[1],dateFormat).date()
def parseDateTime(dateTimeStr):
return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
def loadCsv(fName):
''' load DataFrame from csv file '''
with open(fName,'r') as f:
lines = f.readlines()
dates= []
header = [h.strip() for h in lines[0].strip().split(',')[1:]]
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(parseDateTime(fields[0]))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))
class HistDataCsv(object):
'''class for working with historic database in .csv format'''
def __init__(self,symbol,dbDir,autoCreateDir=False):
self.symbol = symbol
self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
if not os.path.exists(self.dbDir) and autoCreateDir:
print 'Creating data directory ', self.dbDir
os.mkdir(self.dbDir)
self.dates = []
for fName in os.listdir(self.dbDir):
self.dates.append(fileName2date(fName))
def saveData(self,date, df,lowerCaseColumns=True):
''' add data to database'''
if lowerCaseColumns: # this should provide consistency to column names. All lowercase
df.columns = [ c.lower() for c in df.columns]
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
dest = os.path.join(self.dbDir,s) # full path destination
print 'Saving data to: ', dest
df.to_csv(dest)
def loadDate(self,date):
''' load data '''
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
cols = [col.strip() for col in df.columns.tolist()]
df.columns = cols
#df = loadCsv(os.path.join(self.dbDir,s))
return df
def loadDates(self,dates):
''' load multiple dates, concantenating to one DataFrame '''
tmp =[]
print 'Loading multiple dates for ' , self.symbol
p = ProgressBar(len(dates))
for i,date in enumerate(dates):
tmp.append(self.loadDate(date))
p.animate(i+1)
print ''
return pd.concat(tmp)
def createOHLC(self):
''' create ohlc from intraday data'''
ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
for date in self.dates:
print 'Processing', date
try:
df = self.loadDate(date)
ohlc.set_value(date,'open',df['open'][0])
ohlc.set_value(date,'high',df['wap'].max())
ohlc.set_value(date,'low', df['wap'].min())
ohlc.set_value(date,'close',df['close'][-1])
except Exception as e:
print 'Could not convert:', e
return ohlc
def __repr__(self):
return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
class HistDatabase(object):
''' class working with multiple symbols at once '''
def __init__(self, dataDir):
# get symbols from directory names
symbols = []
for l in os.listdir(dataDir):
if os.path.isdir(os.path.join(dataDir,l)):
symbols.append(l)
#build dataset
self.csv = {} # dict of HistDataCsv halndlers
for symbol in symbols:
self.csv[symbol] = HistDataCsv(symbol,dataDir)
def loadDates(self,dates=None):
'''
get data for all symbols as wide panel
provide a dates list. If no dates list is provided, common dates are used.
'''
if dates is None: dates=self.commonDates
tmp = {}
for k,v in self.csv.iteritems():
tmp[k] = v.loadDates(dates)
return pd.WidePanel(tmp)
def toHDF(self,dataFile,dates=None):
''' write wide panel data to a hdfstore file '''
if dates is None: dates=self.commonDates
store = pd.HDFStore(dataFile)
wp = self.loadDates(dates)
store['data'] = wp
store.close()
@property
def commonDates(self):
''' return dates common for all symbols '''
t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
d = list(set(t[0]).intersection(*t[1:]))
return sorted(d)
def __repr__(self):
s = '-----Hist CSV Database-----\n'
for k,v in self.csv.iteritems():
s+= (str(v)+'\n')
return s
#--------------------
if __name__=='__main__':
dbDir =os.path.normpath('D:/data/30sec')
vxx = HistDataCsv('VXX',dbDir)
spy = HistDataCsv('SPY',dbDir)
#
date = dt.date(2012,8,31)
print date
#
pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
print pair.tail()
|
bsd-3-clause
|
mehdidc/scikit-learn
|
sklearn/tests/test_qda.py
|
11
|
3492
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
"""
QDA classification.
This checks that QDA implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
|
bsd-3-clause
|
nss350/magPy
|
inbuilt/projectViewTF.py
|
1
|
9187
|
#!/usr/bin/python
# imports
import sys
import os
sys.path.append(os.path.join('pythonMT_dev', 'core'))
sys.path.append(os.path.join('pythonMT_dev', 'stats'))
sys.path.append(os.path.join('pythonMT_dev', 'utils'))
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
# import my classes
from project import Project
from transferFunctionReader import TransferFunctionReader
# utilities
from utilsPlotter import *
from utilsIO import generalPrint
from utilsPlotter import getPlotFonts
def projectViewTF(proj, **kwargs):
generalPrint("ProjectViewTF", "Showing project transfer functions with options: {}".format(kwargs))
# get options
options = parseKeywords(getDefaultOptions(proj), kwargs)
# loop over sites
for s in options["sites"]:
# now plot TF for this site
if options["oneplot"]:
fig = projectPlotSingle(proj, s, **kwargs)
else:
fig = projectPlotMulti(proj, s, **kwargs)
if options["show"]:
plt.show()
# plot the polarisations on the same plot
def projectPlotSingle(proj, site, **kwargs):
# get options
options = parseKeywords(getDefaultOptions(proj), kwargs)
# print site info
proj.printSiteInfo(site)
# get the sample frequencies for the site
sampleFreqs = set(proj.getSiteSampleFreqs(site))
# find the intersection with the options["freqs"]
sampleFreqs = sampleFreqs.intersection(options["freqs"])
# after the intersection, make this a sorted list again
sampleFreqs = sorted(list(sampleFreqs))
# now loop over the prepend options
# if prepend is a string, then make it a list
if isinstance(options["prepend"], basestring):
options["prepend"] = [options["basestring"]]
plotFonts = options["plotfonts"]
for prepend in options["prepend"]:
# create figure
fig = plt.figure(figsize=options["figsize"])
# sup title
st = fig.suptitle("Site {}: {}: fs = {}".format(site, prepend, arrayToStringInt(sampleFreqs)), fontsize=plotFonts["suptitle"])
st.set_y(0.98)
# plot
mks = ["o", "*", "d", "^", "h"]
colours = {"ExHx":"orange", "EyHy":"green", "ExHy":"red", "EyHx":"blue"}
# some plot options
nrows = 2 # amplitude and phase
ncols = 1
dataFound = 0
# loop over sampling frequencies
for fs, mk in zip(sampleFreqs, mks):
path = os.path.join(proj.getTransDataPathSite(site), "{:d}".format(int(fs)), "{}_fs{:d}_{}".format(site, int(fs), prepend))
# check path - if does not exist, continue
if not checkFilepath(path):
continue
dataFound += 1 # increment by 1 if something found
tfReader = TransferFunctionReader(path)
periods = tfReader.getPeriods()
res = {}
phase = {}
for pol in options["polarisations"]:
res[pol], phase[pol] = tfReader.getResAndPhase(pol)
# can do this in a loop
for idx, pol in enumerate(options["polarisations"]):
# amplitude
plt.subplot(nrows, ncols, 1)
plt.loglog(periods, res[pol], marker=mks[idx], markersize=9, markerfacecolor='none', markeredgecolor=colours[pol], mew=1.5,
c=colours[pol], ls="dashed", label="{} - {}".format(fs, pol))
# phase
plt.subplot(nrows, ncols, 2)
plt.semilogx(periods, phase[pol], marker=mks[idx], markersize=9, markerfacecolor='none', markeredgecolor=colours[pol], mew=1.5,
c=colours[pol], ls="dashed", label="{} - {}".format(fs, pol))
# check if any files found
if dataFound == 0:
continue
# now put the titles and such on
# amplitude
ax = plt.subplot(2, 1, 1)
plt.ylim(options["res_ylim"])
plt.xlim(options["res_xlim"])
plt.grid(True)
plt.title("Apparent Resistivity", fontsize=plotFonts["title"])
plt.xlabel("Period [s]", fontsize=plotFonts["axisLabel"])
plt.ylabel("Apparent Resistivity [Ohm m]", fontsize=plotFonts["axisLabel"])
plt.legend(loc="best", fontsize=plotFonts["legend"], framealpha=0.5)
# make square
ax.set_aspect('equal', adjustable='box')
# set tick sizes
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(plotFonts["axisTicks"])
# phase
ax = plt.subplot(2, 1, 2)
plt.ylim(options["phase_ylim"])
plt.xlim(options["phase_xlim"])
plt.grid(True)
plt.title("Phase", fontsize=plotFonts["title"])
plt.xlabel("Period [s]", fontsize=plotFonts["axisLabel"])
plt.ylabel("Phase [degrees]", fontsize=plotFonts["axisLabel"])
plt.legend(loc="best", fontsize=plotFonts["legend"], framealpha=0.5)
# set tick sizes
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(plotFonts["axisTicks"])
fig.tight_layout()
# shift subplots down, make room for suptitle
fig.subplots_adjust(top=0.92)
if options["save"]:
fig.savefig(os.path.join(proj.getTransDataPathSite(site), "{}_{}.png".format(site, prepend)))
if not options["show"]:
plt.close("all")
return fig
# plot the polarisations on multiple plots
def projectPlotMulti(proj, site, **kwargs):
# get options
options = parseKeywords(getDefaultOptions(proj), kwargs)
# print site info
proj.printSiteInfo(site)
# get the sample frequencies for the site
sampleFreqs = set(proj.getSiteSampleFreqs(site))
# find the intersection with the options["freqs"]
sampleFreqs = sampleFreqs.intersection(options["freqs"])
# after the intersection, make this a sorted list again
sampleFreqs = sorted(list(sampleFreqs))
# now loop over the prepend options
# if prepend is a string, then make it a list
if isinstance(options["prepend"], basestring):
options["prepend"] = [options["basestring"]]
plotFonts = options["plotfonts"]
for prepend in options["prepend"]:
# create figure
fig = plt.figure(figsize=options["figsize"])
# sup title
st = fig.suptitle("Site {}: {}: fs = {}".format(site, prepend, arrayToStringInt(sampleFreqs)), fontsize=plotFonts["suptitle"])
st.set_y(0.98)
# plot
mks = ["o", "*", "d", "^", "h"]
# some plot info
nrows = 2 # amplitude and phase
ncols = len(options["polarisations"])
dataFound = 0
# loop over the sampling frequencies and plot
for fs, mk in zip(sampleFreqs, mks):
path = os.path.join(proj.getTransDataPathSite(site), "{:d}".format(int(fs)), "{}_fs{:d}_{}".format(site, int(fs), prepend))
# check path - if does not exist, continue
if not checkFilepath(path):
continue
dataFound += 1 # increment by 1 if something found
# plot the data
tfReader = TransferFunctionReader(path)
periods = tfReader.getPeriods()
res = {}
phase = {}
for pol in options["polarisations"]:
res[pol], phase[pol] = tfReader.getResAndPhase(pol)
# can do this in a loop
for idx, pol in enumerate(options["polarisations"]):
# amplitude
plt.subplot(nrows, ncols, idx + 1)
plt.loglog(periods, res[pol], marker=mk, markersize=8, ls="dashed", label="{} - {}".format(fs, pol))
# phase
plt.subplot(nrows, ncols, ncols + idx + 1)
plt.semilogx(periods, phase[pol], marker=mk, markersize=8, ls="dashed", label="{} - {}".format(fs, pol))
# check if any files found
if dataFound == 0:
continue
# now put the titles and such on
for idx, pol in enumerate(options["polarisations"]):
# amplitude
ax = plt.subplot(nrows, ncols, idx + 1)
plt.ylim(options["res_ylim"])
plt.xlim(options["res_xlim"])
plt.grid(True)
plt.title("{} Resistivity".format(pol), fontsize=plotFonts["title"])
plt.xlabel("Period [s]", fontsize=plotFonts["axisLabel"])
plt.ylabel("Apparent Resistivity [Ohm m]", fontsize=plotFonts["axisLabel"])
plt.tick_params(axis='both', which='major', length=8, width=0.8, labelsize=plotFonts["axisTicks"])
plt.legend(loc="lower right", fontsize=plotFonts["legend"], framealpha=0.5)
# phase
ax = plt.subplot(nrows, ncols, ncols + idx + 1)
plt.ylim(options["phase_ylim"])
plt.xlim(options["phase_xlim"])
plt.grid(True)
plt.title("{} Phase".format(pol), fontsize=plotFonts["title"])
plt.xlabel("Period [s]", fontsize=plotFonts["axisLabel"])
plt.ylabel("Phase [degrees]", fontsize=plotFonts["axisLabel"])
plt.tick_params(axis='both', which='major', length=8, width=0.8, labelsize=plotFonts["axisTicks"])
plt.legend(loc="lower right", fontsize=plotFonts["legend"], framealpha=0.5)
fig.tight_layout()
# shift subplots down, make room for suptitle
fig.subplots_adjust(top=0.92)
if options["save"]:
fig.savefig(os.path.join(proj.getTransDataPathSite(site), "{}_{}.png".format(site, prepend)))
# return fig, which deals with showing
if not options["show"]:
plt.close("all")
return fig
def getDefaultOptions(proj):
# default options
default = {}
default["sites"] = proj.getAllSites()
default["freqs"] = proj.getAllSampleFreq()
default["polarisations"] = ["ExHx", "ExHy", "EyHx", "EyHy"]
default["save"] = False
default["show"] = True
default["figsize"] = (8, 14)
default["prepend"] = "transFunc"
default["oneplot"] = True
default["res_ylim"] = [0.000000001, 100000]
default["res_xlim"] = [0.00001, 10000]
default["phase_ylim"] = [-20,360]
default["phase_xlim"] = [0.00001, 10000]
default["plotfonts"] = getPlotFonts()
return default
def parseKeywords(default, keywords):
# parse the user supplied keywords
for w in default:
if w in keywords:
default[w] = keywords[w]
return default
|
apache-2.0
|
xwolf12/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
244
|
1593
|
import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/sklearn/mixture/tests/test_gmm.py
|
11
|
20915
|
# Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
import unittest
import copy
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import (assert_true, assert_greater,
assert_raise_message, assert_warns_message,
ignore_warnings)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, spherecv, 'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
with ignore_warnings(category=DeprecationWarning):
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
with ignore_warnings(category=DeprecationWarning):
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
with ignore_warnings(category=DeprecationWarning):
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
with ignore_warnings(category=DeprecationWarning):
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
with ignore_warnings(category=DeprecationWarning):
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
with ignore_warnings(category=DeprecationWarning):
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.dpgmm._DPGMMBase):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def score(self, g, X):
with ignore_warnings(category=DeprecationWarning):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
with ignore_warnings(category=DeprecationWarning):
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_n_parameters():
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
with ignore_warnings(category=DeprecationWarning):
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
with ignore_warnings(category=DeprecationWarning):
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
|
mit
|
pypot/scikit-learn
|
examples/covariance/plot_lw_vs_oas.py
|
248
|
2903
|
"""
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
|
bsd-3-clause
|
ViralLeadership/numpy
|
numpy/lib/twodim_base.py
|
83
|
26903
|
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
bsd-3-clause
|
linebp/pandas
|
pandas/io/parsers.py
|
1
|
119336
|
"""
Module contains tools for processing files into DataFrames or other objects
"""
from __future__ import print_function
from collections import defaultdict
import re
import csv
import sys
import warnings
import datetime
from textwrap import fill
import numpy as np
from pandas import compat
from pandas.compat import (range, lrange, PY3, StringIO, lzip,
zip, string_types, map, u)
from pandas.core.dtypes.common import (
is_integer, _ensure_object,
is_list_like, is_integer_dtype,
is_float, is_dtype_equal,
is_object_dtype, is_string_dtype,
is_scalar, is_categorical_dtype)
from pandas.core.dtypes.missing import isnull
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.index import Index, MultiIndex, RangeIndex
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.categorical import Categorical
from pandas.core import algorithms
from pandas.core.common import AbstractMethodError
from pandas.io.date_converters import generic_parser
from pandas.errors import ParserWarning, ParserError, EmptyDataError
from pandas.io.common import (get_filepath_or_buffer, is_file_like,
_validate_header_arg, _get_handle,
UnicodeReader, UTF8Recoder, _NA_VALUES,
BaseIterator, _infer_compression)
from pandas.core.tools import datetimes as tools
from pandas.util._decorators import Appender
import pandas._libs.lib as lib
import pandas._libs.parsers as parsers
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = u('\ufeff')
_parser_params = """Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.
Parameters
----------
filepath_or_buffer : str, pathlib.Path, py._path.local.LocalPath or any \
object with a read() method (such as a file handle or StringIO)
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file could
be file ://localhost/path/to/table.csv
%s
delimiter : str, default ``None``
Alternative argument name for sep.
delim_whitespace : boolean, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
.. versionadded:: 0.18.1 support for the Python parser.
header : int or list of ints, default 'infer'
Row number(s) to use as the column names, and the start of the data.
Default behavior is as if set to 0 if no ``names`` passed, otherwise
``None``. Explicitly pass ``header=0`` to be able to replace existing
names. The header can be a list of integers that specify row locations for
a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not
specified will be skipped (e.g. 2 in this example is skipped). Note that
this parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so header=0 denotes the first line of data
rather than the first line of the file.
names : array-like, default None
List of column names to use. If file contains no header row, then you
should explicitly pass header=None. Duplicates in this list are not
allowed unless mangle_dupe_cols=True, which is the default.
index_col : int or sequence or False, default None
Column to use as the row labels of the DataFrame. If a sequence is given, a
MultiIndex is used. If you have a malformed file with delimiters at the end
of each line, you might consider index_col=False to force pandas to _not_
use the first column as the index (row names)
usecols : array-like or callable, default None
Return a subset of the columns. If array-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid array-like
`usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz'].
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
as_recarray : boolean, default False
DEPRECATED: this argument will be removed in a future version. Please call
`pd.read_csv(...).to_records()` instead.
Return a NumPy recarray instead of a DataFrame after parsing the data.
If set to True, this option takes precedence over the `squeeze` parameter.
In addition, as row indices are not available in such a format, the
`index_col` parameter will be ignored.
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
prefix : str, default None
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : boolean, default True
Duplicate columns will be specified as 'X.0'...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `str` or `object` to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
%s
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels
true_values : list, default None
Values to consider as True
false_values : list, default None
Values to consider as False
skipinitialspace : boolean, default False
Skip spaces after delimiter.
skiprows : list-like or integer or callable, default None
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c')
skip_footer : int, default 0
DEPRECATED: use the `skipfooter` parameter instead, as they are identical
nrows : int, default None
Number of rows of file to read. Useful for reading pieces of large files
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '""" + fill("', '".join(sorted(_NA_VALUES)),
70, subsequent_indent=" ") + """'`.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
na_filter : boolean, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
skip_blank_lines : boolean, default True
If True, skip over blank lines rather than interpreting as NaN values
parse_dates : boolean or list of ints or names or list of lists or dict, \
default False
* boolean. If True -> try parsing the index.
* list of ints or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result
'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : boolean, default False
If True and parse_dates is enabled, pandas will attempt to infer the format
of the datetime strings in the columns, and if it can be inferred, switch
to a faster method of parsing them. In some cases this can increase the
parsing speed by 5-10x.
keep_date_col : boolean, default False
If True and parse_dates specifies combining multiple columns then
keep the original columns.
date_parser : function, default None
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call date_parser in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by parse_dates) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by parse_dates into a single array
and pass that; and 3) call date_parser once for each row using one or more
strings (corresponding to the columns defined by parse_dates) as arguments.
dayfirst : boolean, default False
DD/MM format dates, international and European format
iterator : boolean, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, default None
Return TextFileReader object for iteration.
See the `IO Tools docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use gzip,
bz2, zip or xz if filepath_or_buffer is a string ending in '.gz', '.bz2',
'.zip', or 'xz', respectively, and no decompression otherwise. If using
'zip', the ZIP file must contain only one data file to be read in.
Set to None for no decompression.
.. versionadded:: 0.18.1 support for 'zip' and 'xz' compression.
thousands : str, default None
Thousands separator
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
float_precision : string, default None
Specifies which converter the C engine should use for floating-point
values. The options are `None` for the ordinary converter,
`high` for the high-precision converter, and `round_trip` for the
round-trip converter.
lineterminator : str (length 1), default None
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : boolean, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), default None
One-character string used to escape delimiter when quoting is QUOTE_NONE.
comment : str, default None
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if comment='#', parsing '#empty\\na,b,c\\n1,2,3'
with `header=0` will result in 'a,b,c' being
treated as the header.
encoding : str, default None
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_
dialect : str or csv.Dialect instance, default None
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
tupleize_cols : boolean, default False
Leave a list of tuples on columns as is (default is to convert to
a Multi Index on the columns)
error_bad_lines : boolean, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : boolean, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
low_memory : boolean, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser)
buffer_lines : int, default None
DEPRECATED: this argument will be removed in a future version because its
value is not respected by the parser
compact_ints : boolean, default False
DEPRECATED: this argument will be removed in a future version
If compact_ints is True, then for any column that is of integer dtype,
the parser will attempt to cast it as the smallest integer dtype possible,
either signed or unsigned depending on the specification from the
`use_unsigned` parameter.
use_unsigned : boolean, default False
DEPRECATED: this argument will be removed in a future version
If integer columns are being compacted (i.e. `compact_ints=True`), specify
whether the column should be compacted to the smallest signed or unsigned
integer dtype.
memory_map : boolean, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
Returns
-------
result : DataFrame or TextParser
"""
# engine is not used in read_fwf() so is factored out of the shared docstring
_engine_doc = """engine : {'c', 'python'}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete."""
_sep_doc = r"""sep : str, default {default}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used automatically. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``"""
_read_csv_doc = """
Read CSV (comma-separated) file into DataFrame
%s
""" % (_parser_params % (_sep_doc.format(default="','"), _engine_doc))
_read_table_doc = """
Read general delimited file into DataFrame
%s
""" % (_parser_params % (_sep_doc.format(default="\\t (tab-stop)"),
_engine_doc))
_fwf_widths = """\
colspecs : list of pairs (int, int) or 'infer'. optional
A list of pairs (tuples) giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of ints. optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
"""
_read_fwf_doc = """
Read a table of fixed-width formatted lines into DataFrame
%s
Also, 'delimiter' is used to specify the filler character of the
fields if it is not spaces (e.g., '~').
""" % (_parser_params % (_fwf_widths, ''))
def _validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name,
min_val=min_val)
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _read(filepath_or_buffer, kwds):
"""Generic reader of line files."""
encoding = kwds.get('encoding', None)
if encoding is not None:
encoding = re.sub('_', '-', encoding).lower()
kwds['encoding'] = encoding
compression = kwds.get('compression')
compression = _infer_compression(filepath_or_buffer, compression)
filepath_or_buffer, _, compression = get_filepath_or_buffer(
filepath_or_buffer, encoding, compression)
kwds['compression'] = compression
if kwds.get('date_parser', None) is not None:
if isinstance(kwds['parse_dates'], bool):
kwds['parse_dates'] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get('iterator', False)
chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)
nrows = _validate_integer('nrows', kwds.get('nrows', None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
try:
data = parser.read(nrows)
finally:
parser.close()
return data
_parser_defaults = {
'delimiter': None,
'doublequote': True,
'escapechar': None,
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
'skipinitialspace': False,
'lineterminator': None,
'header': 'infer',
'index_col': None,
'names': None,
'prefix': None,
'skiprows': None,
'na_values': None,
'true_values': None,
'false_values': None,
'converters': None,
'dtype': None,
'skipfooter': 0,
'keep_default_na': True,
'thousands': None,
'comment': None,
'decimal': b'.',
# 'engine': 'c',
'parse_dates': False,
'keep_date_col': False,
'dayfirst': False,
'date_parser': None,
'usecols': None,
'nrows': None,
# 'iterator': False,
'chunksize': None,
'verbose': False,
'encoding': None,
'squeeze': False,
'compression': None,
'mangle_dupe_cols': True,
'tupleize_cols': False,
'infer_datetime_format': False,
'skip_blank_lines': True
}
_c_parser_defaults = {
'delim_whitespace': False,
'as_recarray': False,
'na_filter': True,
'compact_ints': False,
'use_unsigned': False,
'low_memory': True,
'memory_map': False,
'buffer_lines': None,
'error_bad_lines': True,
'warn_bad_lines': True,
'float_precision': None
}
_fwf_defaults = {
'colspecs': 'infer',
'widths': None,
}
_c_unsupported = set(['skipfooter'])
_python_unsupported = set([
'low_memory',
'buffer_lines',
'float_precision',
])
_deprecated_args = set([
'as_recarray',
'buffer_lines',
'compact_ints',
'use_unsigned',
])
def _make_parser_function(name, sep=','):
default_sep = sep
def parser_f(filepath_or_buffer,
sep=sep,
delimiter=None,
# Column and Index Locations and Names
header='infer',
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression='infer',
thousands=None,
decimal=b'.',
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
tupleize_cols=False,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
skip_footer=0, # deprecated
# Internal
doublequote=True,
delim_whitespace=False,
as_recarray=False,
compact_ints=False,
use_unsigned=False,
low_memory=_c_parser_defaults['low_memory'],
buffer_lines=None,
memory_map=False,
float_precision=None):
# Alias sep -> delimiter.
if delimiter is None:
delimiter = sep
if delim_whitespace and delimiter is not default_sep:
raise ValueError("Specified a delimiter with both sep and"
" delim_whitespace=True; you can only"
" specify one.")
if engine is not None:
engine_specified = True
else:
engine = 'c'
engine_specified = False
if skip_footer != 0:
warnings.warn("The 'skip_footer' argument has "
"been deprecated and will be removed "
"in a future version. Please use the "
"'skipfooter' argument instead.",
FutureWarning, stacklevel=2)
kwds = dict(delimiter=delimiter,
engine=engine,
dialect=dialect,
compression=compression,
engine_specified=engine_specified,
doublequote=doublequote,
escapechar=escapechar,
quotechar=quotechar,
quoting=quoting,
skipinitialspace=skipinitialspace,
lineterminator=lineterminator,
header=header,
index_col=index_col,
names=names,
prefix=prefix,
skiprows=skiprows,
na_values=na_values,
true_values=true_values,
false_values=false_values,
keep_default_na=keep_default_na,
thousands=thousands,
comment=comment,
decimal=decimal,
parse_dates=parse_dates,
keep_date_col=keep_date_col,
dayfirst=dayfirst,
date_parser=date_parser,
nrows=nrows,
iterator=iterator,
chunksize=chunksize,
skipfooter=skipfooter or skip_footer,
converters=converters,
dtype=dtype,
usecols=usecols,
verbose=verbose,
encoding=encoding,
squeeze=squeeze,
memory_map=memory_map,
float_precision=float_precision,
na_filter=na_filter,
compact_ints=compact_ints,
use_unsigned=use_unsigned,
delim_whitespace=delim_whitespace,
as_recarray=as_recarray,
warn_bad_lines=warn_bad_lines,
error_bad_lines=error_bad_lines,
low_memory=low_memory,
buffer_lines=buffer_lines,
mangle_dupe_cols=mangle_dupe_cols,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format,
skip_blank_lines=skip_blank_lines)
return _read(filepath_or_buffer, kwds)
parser_f.__name__ = name
return parser_f
read_csv = _make_parser_function('read_csv', sep=',')
read_csv = Appender(_read_csv_doc)(read_csv)
read_table = _make_parser_function('read_table', sep='\t')
read_table = Appender(_read_table_doc)(read_table)
@Appender(_read_fwf_doc)
def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds):
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, 'infer') and widths is not None:
raise ValueError("You must specify only one of 'widths' and "
"'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds['colspecs'] = colspecs
kwds['engine'] = 'python-fwf'
return _read(filepath_or_buffer, kwds)
class TextFileReader(BaseIterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = 'python'
engine_specified = False
self._engine_specified = kwds.get('engine_specified', engine_specified)
if kwds.get('dialect') is not None:
dialect = kwds['dialect']
if dialect in csv.list_dialects():
dialect = csv.get_dialect(dialect)
# Any valid dialect should have these attributes.
# If any are missing, we will raise automatically.
for param in ('delimiter', 'doublequote', 'escapechar',
'skipinitialspace', 'quotechar', 'quoting'):
try:
dialect_val = getattr(dialect, param)
except AttributeError:
raise ValueError("Invalid dialect '{dialect}' provided"
.format(dialect=kwds['dialect']))
provided = kwds.get(param, _parser_defaults[param])
# Messages for conflicting values between the dialect instance
# and the actual parameters provided.
conflict_msgs = []
if dialect_val != provided:
conflict_msgs.append((
"Conflicting values for '{param}': '{val}' was "
"provided, but the dialect specifies '{diaval}'. "
"Using the dialect-specified value.".format(
param=param, val=provided, diaval=dialect_val)))
if conflict_msgs:
warnings.warn('\n\n'.join(conflict_msgs), ParserWarning,
stacklevel=2)
kwds[param] = dialect_val
if kwds.get('header', 'infer') == 'infer':
kwds['header'] = 0 if kwds.get('names') is None else None
self.orig_options = kwds
# miscellanea
self.engine = engine
self._engine = None
self._currow = 0
options = self._get_options_with_defaults(engine)
self.chunksize = options.pop('chunksize', None)
self.nrows = options.pop('nrows', None)
self.squeeze = options.pop('squeeze', False)
# might mutate self.engine
self.engine = self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if 'has_index_names' in kwds:
self.options['has_index_names'] = kwds['has_index_names']
self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in compat.iteritems(_parser_defaults):
value = kwds.get(argname, default)
# see gh-12935
if argname == 'mangle_dupe_cols' and not value:
raise ValueError('Setting mangle_dupe_cols=False is '
'not supported yet')
else:
options[argname] = value
for argname, default in compat.iteritems(_c_parser_defaults):
if argname in kwds:
value = kwds[argname]
if engine != 'c' and value != default:
if ('python' in engine and
argname not in _python_unsupported):
pass
else:
raise ValueError(
'The %r option is not supported with the'
' %r engine' % (argname, engine))
else:
value = default
options[argname] = value
if engine == 'python-fwf':
for argname, default in compat.iteritems(_fwf_defaults):
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f):
next_attr = "__next__" if PY3 else "next"
# The C engine doesn't need the file-like to have the "next" or
# "__next__" attribute. However, the Python engine explicitly calls
# "next(...)" when iterating through such an object, meaning it
# needs to have that attribute ("next" for Python 2.x, "__next__"
# for Python 3.x)
if engine != "c" and not hasattr(f, next_attr):
msg = ("The 'python' engine cannot iterate "
"through this file buffer.")
raise ValueError(msg)
return engine
def _clean_options(self, options, engine):
result = options.copy()
engine_specified = self._engine_specified
fallback_reason = None
sep = options['delimiter']
delim_whitespace = options['delim_whitespace']
# C engine not supported yet
if engine == 'c':
if options['skipfooter'] > 0:
fallback_reason = "the 'c' engine does not support"\
" skipfooter"
engine = 'python'
encoding = sys.getfilesystemencoding() or 'utf-8'
if sep is None and not delim_whitespace:
if engine == 'c':
fallback_reason = "the 'c' engine does not support"\
" sep=None with delim_whitespace=False"
engine = 'python'
elif sep is not None and len(sep) > 1:
if engine == 'c' and sep == '\s+':
result['delim_whitespace'] = True
del result['delimiter']
elif engine not in ('python', 'python-fwf'):
# wait until regex engine integrated
fallback_reason = "the 'c' engine does not support"\
" regex separators (separators > 1 char and"\
" different from '\s+' are"\
" interpreted as regex)"
engine = 'python'
elif delim_whitespace:
if 'python' in engine:
result['delimiter'] = '\s+'
elif sep is not None:
encodeable = True
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ('python', 'python-fwf'):
fallback_reason = "the separator encoded in {encoding}" \
" is > 1 char long, and the 'c' engine" \
" does not support such separators".format(
encoding=encoding)
engine = 'python'
quotechar = options['quotechar']
if (quotechar is not None and
isinstance(quotechar, (str, compat.text_type, bytes))):
if (len(quotechar) == 1 and ord(quotechar) > 127 and
engine not in ('python', 'python-fwf')):
fallback_reason = ("ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support "
"such quotechars")
engine = 'python'
if fallback_reason and engine_specified:
raise ValueError(fallback_reason)
if engine == 'c':
for arg in _c_unsupported:
del result[arg]
if 'python' in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
msg = ("Falling back to the 'python' engine because"
" {reason}, but this causes {option!r} to be"
" ignored as it is not supported by the 'python'"
" engine.").format(reason=fallback_reason,
option=arg)
raise ValueError(msg)
del result[arg]
if fallback_reason:
warnings.warn(("Falling back to the 'python' engine because"
" {0}; you can avoid this warning by specifying"
" engine='python'.").format(fallback_reason),
ParserWarning, stacklevel=5)
index_col = options['index_col']
names = options['names']
converters = options['converters']
na_values = options['na_values']
skiprows = options['skiprows']
# really delete this one
keep_default_na = result.pop('keep_default_na')
_validate_header_arg(options['header'])
depr_warning = ''
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
msg = ("The '{arg}' argument has been deprecated "
"and will be removed in a future version."
.format(arg=arg))
if arg == 'as_recarray':
msg += ' Please call pd.to_csv(...).to_records() instead.'
if result.get(arg, parser_default) != parser_default:
depr_warning += msg + '\n\n'
if depr_warning != '':
warnings.warn(depr_warning, FutureWarning, stacklevel=2)
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result['index_col'] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError('Type converters must be a dict or'
' subclass, input was '
'a {0!r}'.format(type(converters).__name__))
else:
converters = {}
# Converting values to NA
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
# c-engine, so only need for python parsers
if engine != 'c':
if is_integer(skiprows):
skiprows = lrange(skiprows)
if skiprows is None:
skiprows = set()
elif not callable(skiprows):
skiprows = set(skiprows)
# put stuff back
result['names'] = names
result['converters'] = converters
result['na_values'] = na_values
result['na_fvalues'] = na_fvalues
result['skiprows'] = skiprows
return result, engine
def __next__(self):
try:
return self.get_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine='c'):
if engine == 'c':
self._engine = CParserWrapper(self.f, **self.options)
else:
if engine == 'python':
klass = PythonParser
elif engine == 'python-fwf':
klass = FixedWidthFieldParser
else:
raise ValueError('Unknown engine: {engine} (valid options are'
' "c", "python", or' ' "python-fwf")'.format(
engine=engine))
self._engine = klass(self.f, **self.options)
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
if nrows is not None:
if self.options.get('skipfooter'):
raise ValueError('skipfooter not supported for iteration')
ret = self._engine.read(nrows)
if self.options.get('as_recarray'):
return ret
# May alter columns / col_dict
index, columns, col_dict = self._create_index(ret)
if index is None:
if col_dict:
# Any column is actually fine:
new_rows = len(compat.next(compat.itervalues(col_dict)))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = len(index)
df = DataFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]].copy()
return df
def _create_index(self, ret):
index, columns, col_dict = ret
return index, columns, col_dict
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
def _is_index_col(col):
return col is not None and col is not False
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return set([i for i, name in enumerate(names)
if usecols(name)])
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : array-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = ("'usecols' must either be all strings, all unicode, "
"all integers or a callable")
if usecols is not None:
if callable(usecols):
return usecols, None
usecols_dtype = lib.infer_dtype(usecols)
if usecols_dtype not in ('empty', 'integer',
'string', 'unicode'):
raise ValueError(msg)
return set(usecols), usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = ("Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter")
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase(object):
def __init__(self, kwds):
self.names = kwds.get('names')
self.orig_names = None
self.prefix = kwds.pop('prefix', None)
self.index_col = kwds.get('index_col', None)
self.index_names = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(
kwds.pop('parse_dates', False))
self.date_parser = kwds.pop('date_parser', None)
self.dayfirst = kwds.pop('dayfirst', False)
self.keep_date_col = kwds.pop('keep_date_col', False)
self.na_values = kwds.get('na_values')
self.na_fvalues = kwds.get('na_fvalues')
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
self.as_recarray = kwds.get('as_recarray', False)
self.tupleize_cols = kwds.get('tupleize_cols', False)
self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True)
self.infer_datetime_format = kwds.pop('infer_datetime_format', False)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format
)
# validate header options for mi
self.header = kwds.get('header')
if isinstance(self.header, (list, tuple, np.ndarray)):
if not all(map(is_integer, self.header)):
raise ValueError("header must be integer or list of integers")
if kwds.get('as_recarray'):
raise ValueError("cannot specify as_recarray when "
"specifying a multi-index header")
if kwds.get('usecols'):
raise ValueError("cannot specify usecols when "
"specifying a multi-index header")
if kwds.get('names'):
raise ValueError("cannot specify names when "
"specifying a multi-index header")
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple,
np.ndarray))
if not (is_sequence and
all(map(is_integer, self.index_col)) or
is_integer(self.index_col)):
raise ValueError("index_col must only contain row numbers "
"when specifying a multi-index header")
# GH 16338
elif self.header is not None and not is_integer(self.header):
raise ValueError("header must be integer or list of integers")
self._name_processed = False
self._first_chunk = True
# GH 13932
# keep references to file handles opened by the parser itself
self.handles = []
def close(self):
for f in self.handles:
f.close()
@property
def _has_complex_date_col(self):
return (isinstance(self.parse_dates, dict) or
(isinstance(self.parse_dates, list) and
len(self.parse_dates) > 0 and
isinstance(self.parse_dates[0], list)))
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return ((j == self.parse_dates) or
(name is not None and name == self.parse_dates))
else:
return ((j in self.parse_dates) or
(name is not None and name in self.parse_dates))
def _extract_multi_indexer_columns(self, header, index_names, col_names,
passed_names=False):
""" extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers """
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(index_names,
self.index_col)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple([r[i] for i in range(field_count) if i not in sic])
columns = lzip(*[extract(r) for r in header])
names = ic + columns
def tostr(x):
return str(x) if not isinstance(x, compat.string_types) else x
# if we find 'Unnamed' all of a single level, then our header was too
# long
for n in range(len(columns[0])):
if all(['Unnamed' in tostr(c[n]) for c in columns]):
raise ParserError(
"Passed header=[%s] are too many rows for this "
"multi_index of columns"
% ','.join([str(x) for x in self.header])
)
# clean the column names (if we have an index_col)
if len(ic):
col_names = [r[0] if len(r[0]) and 'Unnamed' not in r[0] else None
for r in header]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
counts = {}
for i, col in enumerate(names):
cur_count = counts.get(col, 0)
if cur_count > 0:
names[i] = '%s.%d' % (col, cur_count)
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if (not self.tupleize_cols and len(columns) and
not isinstance(columns, MultiIndex) and
all([isinstance(c, tuple) for c in columns])):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._get_simple_index(alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _,
self.index_col) = _clean_index_names(list(columns),
self.index_col)
self._name_processed = True
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = len(indexnamerow) - len(columns)
index = index.set_names(indexnamerow[:coffset])
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, compat.string_types):
return col
raise ValueError('Index %s invalid' % col)
index = None
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.append(i)
index.append(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in reversed(sorted(to_remove)):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _get_complex_date_index(self, data, col_names):
def _get_name(icol):
if isinstance(icol, compat.string_types):
return icol
if col_names is None:
raise ValueError(('Must supply column order to use %s as '
'index') % str(icol))
for i, c in enumerate(col_names):
if i == icol:
return c
index = None
to_remove = []
index = []
for idx in self.index_col:
name = _get_name(idx)
to_remove.append(name)
index.append(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in reversed(sorted(to_remove)):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True):
arrays = []
for i, arr in enumerate(index):
if (try_parse_dates and self._should_parse_dates(i)):
arr = self._date_conv(arr)
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
if isinstance(self.na_values, dict):
col_name = self.index_names[i]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
index = MultiIndex.from_arrays(arrays, names=self.index_names)
return index
def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
converters=None, dtypes=None):
result = {}
for c, values in compat.iteritems(dct):
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(("Both a converter and dtype were specified "
"for column {0} - only the converter will "
"be used").format(c), ParserWarning,
stacklevel=7)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(
values, list(na_values)).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool=False)
else:
# skip inference if specified dtype is object
try_num_bool = not (cast_type and is_string_dtype(cast_type))
# general type inference and conversion
cvals, na_count = self._infer_types(
values, set(col_na_values) | col_na_fvalues,
try_num_bool)
# type specificed in dtype param
if cast_type and not is_dtype_equal(cvals, cast_type):
cvals = self._cast_types(cvals, cast_type, c)
if issubclass(cvals.dtype.type, np.integer) and self.compact_ints:
cvals = lib.downcast_int64(
cvals, parsers.na_values,
self.use_unsigned)
result[c] = cvals
if verbose and na_count:
print('Filled %d NA values in column %s' % (na_count, str(c)))
return result
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns:
--------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.isin(values, list(na_values))
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool:
try:
result = lib.maybe_convert_numeric(values, na_values, False)
na_count = isnull(result).sum()
except Exception:
result = values
if values.dtype == np.object_:
na_count = lib.sanitize_objects(result, na_values, False)
else:
result = values
if values.dtype == np.object_:
na_count = lib.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = lib.maybe_convert_bool(values,
true_values=self.true_values,
false_values=self.false_values)
return result, na_count
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
# XXX this is for consistency with
# c-parser which parses all categories
# as strings
if not is_object_dtype(values):
values = astype_nansafe(values, str)
values = Categorical(values)
else:
try:
values = astype_nansafe(values, cast_type, copy=True)
except ValueError:
raise ValueError("Unable to convert column %s to "
"type %s" % (column, cast_type))
return values
def _do_date_conversions(self, names, data):
# returns data, columns
if self.parse_dates is not None:
data, names = _process_date_conversion(
data, self._date_conv, self.parse_dates, self.index_col,
self.index_names, names, keep_date_col=self.keep_date_col)
return names, data
class CParserWrapper(ParserBase):
"""
"""
def __init__(self, src, **kwds):
self.kwds = kwds
kwds = kwds.copy()
ParserBase.__init__(self, kwds)
if 'utf-16' in (kwds.get('encoding') or ''):
if isinstance(src, compat.string_types):
src = open(src, 'rb')
self.handles.append(src)
src = UTF8Recoder(src, kwds['encoding'])
kwds['encoding'] = 'utf-8'
# #2442
kwds['allow_leading_cols'] = self.index_col is not False
self._reader = parsers.TextReader(src, **kwds)
# XXX
self.usecols, self.usecols_dtype = _validate_usecols_arg(
self._reader.usecols)
passed_names = self.names is None
if self._reader.header is None:
self.names = None
else:
if len(self._reader.header) > 1:
# we have a multi index in the columns
self.names, self.index_names, self.col_names, passed_names = (
self._extract_multi_indexer_columns(
self._reader.header, self.index_names, self.col_names,
passed_names
)
)
else:
self.names = list(self._reader.header[0])
if self.names is None:
if self.prefix:
self.names = ['%s%d' % (self.prefix, i)
for i in range(self._reader.table_width)]
else:
self.names = lrange(self._reader.table_width)
# gh-9755
#
# need to set orig_names here first
# so that proper indexing can be done
# with _set_noconvert_columns
#
# once names has been filtered, we will
# then set orig_names again to names
self.orig_names = self.names[:]
if self.usecols:
usecols = _evaluate_usecols(self.usecols, self.orig_names)
# GH 14671
if (self.usecols_dtype == 'string' and
not set(usecols).issubset(self.orig_names)):
raise ValueError("Usecols do not match names.")
if len(self.names) > len(usecols):
self.names = [n for i, n in enumerate(self.names)
if (i in usecols or n in usecols)]
if len(self.names) < len(usecols):
raise ValueError("Usecols do not match names.")
self._set_noconvert_columns()
self.orig_names = self.names
if not self._has_complex_date_col:
if (self._reader.leading_cols == 0 and
_is_index_col(self.index_col)):
self._name_processed = True
(index_names, self.names,
self.index_col) = _clean_index_names(self.names,
self.index_col)
if self.index_names is None:
self.index_names = index_names
if self._reader.header is None and not passed_names:
self.index_names = [None] * len(self.index_names)
self._implicit_index = self._reader.leading_cols > 0
def close(self):
for f in self.handles:
f.close()
# close additional handles opened by C parser (for compression)
try:
self._reader.close()
except:
pass
def _set_noconvert_columns(self):
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions.
"""
names = self.orig_names
if self.usecols_dtype == 'integer':
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = list(self.usecols)
elif (callable(self.usecols) or
self.usecols_dtype not in ('empty', None)):
# The names attribute should have the correct columns
# in the proper order for indexing with parse_dates.
usecols = self.names[:]
else:
# Usecols is empty.
usecols = None
def _set(x):
if usecols is not None and is_integer(x):
x = usecols[x]
if not is_integer(x):
x = names.index(x)
self._reader.set_noconvert(x)
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
def set_error_bad_lines(self, status):
self._reader.set_error_bad_lines(int(status))
def read(self, nrows=None):
try:
data = self._reader.read(nrows)
except StopIteration:
if self._first_chunk:
self._first_chunk = False
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = _get_empty_meta(
names, self.index_col, self.index_names,
dtype=self.kwds.get('dtype'))
columns = self._maybe_make_multi_index_columns(
columns, self.col_names)
if self.usecols is not None:
columns = self._filter_usecols(columns)
col_dict = dict(filter(lambda item: item[0] in columns,
col_dict.items()))
return index, columns, col_dict
else:
raise
# Done with first read, next time raise StopIteration
self._first_chunk = False
if self.as_recarray:
# what to do if there are leading columns?
return data
names = self.names
if self._reader.leading_cols:
if self._has_complex_date_col:
raise NotImplementedError('file structure not yet supported')
# implicit index, no index names
arrays = []
for i in range(self._reader.leading_cols):
if self.index_col is None:
values = data.pop(i)
else:
values = data.pop(self.index_col[i])
values = self._maybe_parse_dates(values, i,
try_parse_dates=True)
arrays.append(values)
index = MultiIndex.from_arrays(arrays)
if self.usecols is not None:
names = self._filter_usecols(names)
names = self._maybe_dedup_names(names)
# rename dict keys
data = sorted(data.items())
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
else:
# rename dict keys
data = sorted(data.items())
# ugh, mutation
names = list(self.orig_names)
names = self._maybe_dedup_names(names)
if self.usecols is not None:
names = self._filter_usecols(names)
# columns as list
alldata = [x[1] for x in data]
data = dict((k, v) for k, (i, v) in zip(names, data))
names, data = self._do_date_conversions(names, data)
index, names = self._make_index(data, alldata, names)
# maybe create a mi on the columns
names = self._maybe_make_multi_index_columns(names, self.col_names)
return index, names, data
def _filter_usecols(self, names):
# hackish
usecols = _evaluate_usecols(self.usecols, names)
if usecols is not None and len(names) != len(usecols):
names = [name for i, name in enumerate(names)
if i in usecols or name in usecols]
return names
def _get_index_names(self):
names = list(self._reader.header[0])
idx_names = None
if self._reader.leading_cols == 0 and self.index_col is not None:
(idx_names, names,
self.index_col) = _clean_index_names(names, self.index_col)
return names, idx_names
def _maybe_parse_dates(self, values, index, try_parse_dates=True):
if try_parse_dates and self._should_parse_dates(index):
values = self._date_conv(values)
return values
def TextParser(*args, **kwds):
"""
Converts lists of lists/tuples into DataFrames with proper type inference
and optional (e.g. string to datetime) conversion. Also enables iterating
lazily over chunks of large files
Parameters
----------
data : file-like object or list
delimiter : separator character to use
dialect : str or csv.Dialect instance, default None
Ignored if delimiter is longer than 1 character
names : sequence, default
header : int, default 0
Row to use to parse column labels. Defaults to the first row. Prior
rows will be discarded
index_col : int or list, default None
Column or columns to use as the (possibly hierarchical) index
has_index_names: boolean, default False
True if the cols defined in index_col have an index name and are
not in the header
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN.
keep_default_na : bool, default True
thousands : str, default None
Thousands separator
comment : str, default None
Comment out remainder of line
parse_dates : boolean, default False
keep_date_col : boolean, default False
date_parser : function, default None
skiprows : list of integers
Row numbers to skip
skipfooter : int
Number of line at bottom of file to skip
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
encoding : string, default None
Encoding to use for UTF when reading/writing (ex. 'utf-8')
squeeze : boolean, default False
returns Series if only one column
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
float_precision : string, default None
Specifies which converter the C engine should use for floating-point
values. The options are None for the ordinary converter,
'high' for the high-precision converter, and 'round_trip' for the
round-trip converter.
"""
kwds['engine'] = 'python'
return TextFileReader(*args, **kwds)
def count_empty_vals(vals):
return sum([1 for v in vals if v == '' or v is None])
class PythonParser(ParserBase):
def __init__(self, f, **kwds):
"""
Workhorse function for processing nested list into DataFrame
Should be replaced by np.genfromtxt eventually?
"""
ParserBase.__init__(self, kwds)
self.data = None
self.buf = []
self.pos = 0
self.line_pos = 0
self.encoding = kwds['encoding']
self.compression = kwds['compression']
self.memory_map = kwds['memory_map']
self.skiprows = kwds['skiprows']
if callable(self.skiprows):
self.skipfunc = self.skiprows
else:
self.skipfunc = lambda x: x in self.skiprows
self.skipfooter = _validate_skipfooter_arg(kwds['skipfooter'])
self.delimiter = kwds['delimiter']
self.quotechar = kwds['quotechar']
if isinstance(self.quotechar, compat.text_type):
self.quotechar = str(self.quotechar)
self.escapechar = kwds['escapechar']
self.doublequote = kwds['doublequote']
self.skipinitialspace = kwds['skipinitialspace']
self.lineterminator = kwds['lineterminator']
self.quoting = kwds['quoting']
self.usecols, _ = _validate_usecols_arg(kwds['usecols'])
self.skip_blank_lines = kwds['skip_blank_lines']
self.warn_bad_lines = kwds['warn_bad_lines']
self.error_bad_lines = kwds['error_bad_lines']
self.names_passed = kwds['names'] or None
self.na_filter = kwds['na_filter']
self.has_index_names = False
if 'has_index_names' in kwds:
self.has_index_names = kwds['has_index_names']
self.verbose = kwds['verbose']
self.converters = kwds['converters']
self.dtype = kwds['dtype']
self.compact_ints = kwds['compact_ints']
self.use_unsigned = kwds['use_unsigned']
self.thousands = kwds['thousands']
self.decimal = kwds['decimal']
self.comment = kwds['comment']
self._comment_lines = []
mode = 'r' if PY3 else 'rb'
f, handles = _get_handle(f, mode, encoding=self.encoding,
compression=self.compression,
memory_map=self.memory_map)
self.handles.extend(handles)
# Set self.data to something that can read lines.
if hasattr(f, 'readline'):
self._make_reader(f)
else:
self.data = f
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if is is specified.
self._col_indices = None
self.columns, self.num_original_columns = self._infer_columns()
# Now self.columns has the set of columns that we will process.
# The original set is stored in self.original_columns.
if len(self.columns) > 1:
# we are processing a multi index column
self.columns, self.index_names, self.col_names, _ = (
self._extract_multi_indexer_columns(
self.columns, self.index_names, self.col_names
)
)
# Update list of original names to include all indices.
self.num_original_columns = len(self.columns)
else:
self.columns = self.columns[0]
# get popped off for index
self.orig_names = list(self.columns)
# needs to be cleaned/refactored
# multiple date column thing turning into a real spaghetti factory
if not self._has_complex_date_col:
(index_names, self.orig_names, self.columns) = (
self._get_index_name(self.columns))
self._name_processed = True
if self.index_names is None:
self.index_names = index_names
if self.parse_dates:
self._no_thousands_columns = self._set_no_thousands_columns()
else:
self._no_thousands_columns = None
if len(self.decimal) != 1:
raise ValueError('Only length-1 decimal markers supported')
if self.thousands is None:
self.nonnum = re.compile('[^-^0-9^%s]+' % self.decimal)
else:
self.nonnum = re.compile('[^-^0-9^%s^%s]+' % (self.thousands,
self.decimal))
def _set_no_thousands_columns(self):
# Create a set of column ids that are not to be stripped of thousands
# operators.
noconvert_columns = set()
def _set(x):
if is_integer(x):
noconvert_columns.add(x)
else:
noconvert_columns.add(self.columns.index(x))
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
return noconvert_columns
def _make_reader(self, f):
sep = self.delimiter
if sep is None or len(sep) == 1:
if self.lineterminator:
raise ValueError('Custom line terminators not supported in '
'python parser (yet)')
class MyDialect(csv.Dialect):
delimiter = self.delimiter
quotechar = self.quotechar
escapechar = self.escapechar
doublequote = self.doublequote
skipinitialspace = self.skipinitialspace
quoting = self.quoting
lineterminator = '\n'
dia = MyDialect
sniff_sep = True
if sep is not None:
sniff_sep = False
dia.delimiter = sep
# attempt to sniff the delimiter
if sniff_sep:
line = f.readline()
while self.skipfunc(self.pos):
self.pos += 1
line = f.readline()
line = self._check_comments([line])[0]
self.pos += 1
self.line_pos += 1
sniffed = csv.Sniffer().sniff(line)
dia.delimiter = sniffed.delimiter
if self.encoding is not None:
self.buf.extend(list(
UnicodeReader(StringIO(line),
dialect=dia,
encoding=self.encoding)))
else:
self.buf.extend(list(csv.reader(StringIO(line),
dialect=dia)))
if self.encoding is not None:
reader = UnicodeReader(f, dialect=dia,
encoding=self.encoding,
strict=True)
else:
reader = csv.reader(f, dialect=dia,
strict=True)
else:
def _read():
line = f.readline()
if compat.PY2 and self.encoding:
line = line.decode(self.encoding)
pat = re.compile(sep)
yield pat.split(line.strip())
for line in f:
yield pat.split(line.strip())
reader = _read()
self.data = reader
def read(self, rows=None):
try:
content = self._get_lines(rows)
except StopIteration:
if self._first_chunk:
content = []
else:
raise
# done with first read, next time raise StopIteration
self._first_chunk = False
columns = list(self.orig_names)
if not len(content): # pragma: no cover
# DataFrame with the right metadata, even though it's length 0
names = self._maybe_dedup_names(self.orig_names)
index, columns, col_dict = _get_empty_meta(
names, self.index_col, self.index_names, self.dtype)
columns = self._maybe_make_multi_index_columns(
columns, self.col_names)
return index, columns, col_dict
# handle new style for names in index
count_empty_content_vals = count_empty_vals(content[0])
indexnamerow = None
if self.has_index_names and count_empty_content_vals == len(columns):
indexnamerow = content[0]
content = content[1:]
alldata = self._rows_to_cols(content)
data = self._exclude_implicit_index(alldata)
columns = self._maybe_dedup_names(self.columns)
columns, data = self._do_date_conversions(columns, data)
data = self._convert_data(data)
if self.as_recarray:
return self._to_recarray(data, columns)
index, columns = self._make_index(data, alldata, columns, indexnamerow)
return index, columns, data
def _exclude_implicit_index(self, alldata):
names = self._maybe_dedup_names(self.orig_names)
if self._implicit_index:
excl_indices = self.index_col
data = {}
offset = 0
for i, col in enumerate(names):
while i + offset in excl_indices:
offset += 1
data[col] = alldata[i + offset]
else:
data = dict((k, v) for k, v in zip(names, alldata))
return data
# legacy
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
return self.read(rows=size)
def _convert_data(self, data):
# apply converters
def _clean_mapping(mapping):
"converts col numbers to names"
clean = {}
for col, v in compat.iteritems(mapping):
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean[col] = v
return clean
clean_conv = _clean_mapping(self.converters)
if not isinstance(self.dtype, dict):
# handles single dtype applied to all columns
clean_dtypes = self.dtype
else:
clean_dtypes = _clean_mapping(self.dtype)
# Apply NA values.
clean_na_values = {}
clean_na_fvalues = {}
if isinstance(self.na_values, dict):
for col in self.na_values:
na_value = self.na_values[col]
na_fvalue = self.na_fvalues[col]
if isinstance(col, int) and col not in self.orig_names:
col = self.orig_names[col]
clean_na_values[col] = na_value
clean_na_fvalues[col] = na_fvalue
else:
clean_na_values = self.na_values
clean_na_fvalues = self.na_fvalues
return self._convert_to_ndarrays(data, clean_na_values,
clean_na_fvalues, self.verbose,
clean_conv, clean_dtypes)
def _to_recarray(self, data, columns):
dtypes = []
o = compat.OrderedDict()
# use the columns to "order" the keys
# in the unordered 'data' dictionary
for col in columns:
dtypes.append((str(col), data[col].dtype))
o[col] = data[col]
tuples = lzip(*o.values())
return np.array(tuples, dtypes)
def _infer_columns(self):
names = self.names
num_original_columns = 0
clear_buffer = True
if self.header is not None:
header = self.header
# we have a mi columns, so read an extra line
if isinstance(header, (list, tuple, np.ndarray)):
have_mi_columns = True
header = list(header) + [header[-1] + 1]
else:
have_mi_columns = False
header = [header]
columns = []
for level, hr in enumerate(header):
try:
line = self._buffered_line()
while self.line_pos <= hr:
line = self._next_line()
except StopIteration:
if self.line_pos < hr:
raise ValueError(
'Passed header=%s but only %d lines in file'
% (hr, self.line_pos + 1))
# We have an empty file, so check
# if columns are provided. That will
# serve as the 'line' for parsing
if have_mi_columns and hr > 0:
if clear_buffer:
self._clear_buffer()
columns.append([None] * len(columns[-1]))
return columns, num_original_columns
if not self.names:
raise EmptyDataError(
"No columns to parse from file")
line = self.names[:]
unnamed_count = 0
this_columns = []
for i, c in enumerate(line):
if c == '':
if have_mi_columns:
this_columns.append('Unnamed: %d_level_%d'
% (i, level))
else:
this_columns.append('Unnamed: %d' % i)
unnamed_count += 1
else:
this_columns.append(c)
if not have_mi_columns and self.mangle_dupe_cols:
counts = {}
for i, col in enumerate(this_columns):
cur_count = counts.get(col, 0)
if cur_count > 0:
this_columns[i] = '%s.%d' % (col, cur_count)
counts[col] = cur_count + 1
elif have_mi_columns:
# if we have grabbed an extra line, but its not in our
# format so save in the buffer, and create an blank extra
# line for the rest of the parsing code
if hr == header[-1]:
lc = len(this_columns)
ic = (len(self.index_col)
if self.index_col is not None else 0)
if lc != unnamed_count and lc - ic > unnamed_count:
clear_buffer = False
this_columns = [None] * lc
self.buf = [self.buf[-1]]
columns.append(this_columns)
if len(columns) == 1:
num_original_columns = len(this_columns)
if clear_buffer:
self._clear_buffer()
if names is not None:
if ((self.usecols is not None and
len(names) != len(self.usecols)) or
(self.usecols is None and
len(names) != len(columns[0]))):
raise ValueError('Number of passed names did not match '
'number of header fields in the file')
if len(columns) > 1:
raise TypeError('Cannot pass names with multi-index '
'columns')
if self.usecols is not None:
# Set _use_cols. We don't store columns because they are
# overwritten.
self._handle_usecols(columns, names)
else:
self._col_indices = None
num_original_columns = len(names)
columns = [names]
else:
columns = self._handle_usecols(columns, columns[0])
else:
try:
line = self._buffered_line()
except StopIteration:
if not names:
raise EmptyDataError(
"No columns to parse from file")
line = names[:]
ncols = len(line)
num_original_columns = ncols
if not names:
if self.prefix:
columns = [['%s%d' % (self.prefix, i)
for i in range(ncols)]]
else:
columns = [lrange(ncols)]
columns = self._handle_usecols(columns, columns[0])
else:
if self.usecols is None or len(names) >= num_original_columns:
columns = self._handle_usecols([names], names)
num_original_columns = len(names)
else:
if (not callable(self.usecols) and
len(names) != len(self.usecols)):
raise ValueError(
'Number of passed names did not match number of '
'header fields in the file'
)
# Ignore output but set used columns.
self._handle_usecols([names], names)
columns = [names]
num_original_columns = ncols
return columns, num_original_columns
def _handle_usecols(self, columns, usecols_key):
"""
Sets self._col_indices
usecols_key is used if there are string usecols.
"""
if self.usecols is not None:
if callable(self.usecols):
col_indices = _evaluate_usecols(self.usecols, usecols_key)
elif any([isinstance(u, string_types) for u in self.usecols]):
if len(columns) > 1:
raise ValueError("If using multiple headers, usecols must "
"be integers.")
col_indices = []
for col in self.usecols:
if isinstance(col, string_types):
col_indices.append(usecols_key.index(col))
else:
col_indices.append(col)
else:
col_indices = self.usecols
columns = [[n for i, n in enumerate(column) if i in col_indices]
for column in columns]
self._col_indices = col_indices
return columns
def _buffered_line(self):
"""
Return a line from buffer, filling buffer if required.
"""
if len(self.buf) > 0:
return self.buf[0]
else:
return self._next_line()
def _check_for_bom(self, first_row):
"""
Checks whether the file begins with the BOM character.
If it does, remove it. In addition, if there is quoting
in the field subsequent to the BOM, remove it as well
because it technically takes place at the beginning of
the name, not the middle of it.
"""
# first_row will be a list, so we need to check
# that that list is not empty before proceeding.
if not first_row:
return first_row
# The first element of this row is the one that could have the
# BOM that we want to remove. Check that the first element is a
# string before proceeding.
if not isinstance(first_row[0], compat.string_types):
return first_row
# Check that the string is not empty, as that would
# obviously not have a BOM at the start of it.
if not first_row[0]:
return first_row
# Since the string is non-empty, check that it does
# in fact begin with a BOM.
first_elt = first_row[0][0]
# This is to avoid warnings we get in Python 2.x if
# we find ourselves comparing with non-Unicode
if compat.PY2 and not isinstance(first_elt, unicode): # noqa
try:
first_elt = u(first_elt)
except UnicodeDecodeError:
return first_row
if first_elt != _BOM:
return first_row
first_row = first_row[0]
if len(first_row) > 1 and first_row[1] == self.quotechar:
start = 2
quote = first_row[1]
end = first_row[2:].index(quote) + 2
# Extract the data between the quotation marks
new_row = first_row[start:end]
# Extract any remaining data after the second
# quotation mark.
if len(first_row) > end + 1:
new_row += first_row[end + 1:]
return [new_row]
elif len(first_row) > 1:
return [first_row[1:]]
else:
# First row is just the BOM, so we
# return an empty string.
return [""]
def _is_line_empty(self, line):
"""
Check if a line is empty or not.
Parameters
----------
line : str, array-like
The line of data to check.
Returns
-------
boolean : Whether or not the line is empty.
"""
return not line or all(not x for x in line)
def _next_line(self):
if isinstance(self.data, list):
while self.skipfunc(self.pos):
self.pos += 1
while True:
try:
line = self._check_comments([self.data[self.pos]])[0]
self.pos += 1
# either uncommented or blank to begin with
if (not self.skip_blank_lines and
(self._is_line_empty(
self.data[self.pos - 1]) or line)):
break
elif self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
except IndexError:
raise StopIteration
else:
while self.skipfunc(self.pos):
self.pos += 1
next(self.data)
while True:
orig_line = self._next_iter_line(row_num=self.pos + 1)
self.pos += 1
if orig_line is not None:
line = self._check_comments([orig_line])[0]
if self.skip_blank_lines:
ret = self._remove_empty_lines([line])
if ret:
line = ret[0]
break
elif self._is_line_empty(orig_line) or line:
break
# This was the first line of the file,
# which could contain the BOM at the
# beginning of it.
if self.pos == 1:
line = self._check_for_bom(line)
self.line_pos += 1
self.buf.append(line)
return line
def _alert_malformed(self, msg, row_num):
"""
Alert a user about a malformed row.
If `self.error_bad_lines` is True, the alert will be `ParserError`.
If `self.warn_bad_lines` is True, the alert will be printed out.
Parameters
----------
msg : The error message to display.
row_num : The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally.
"""
if self.error_bad_lines:
raise ParserError(msg)
elif self.warn_bad_lines:
base = 'Skipping line {row_num}: '.format(row_num=row_num)
sys.stderr.write(base + msg + '\n')
def _next_iter_line(self, row_num):
"""
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num : The row number of the line being parsed.
"""
try:
return next(self.data)
except csv.Error as e:
if self.warn_bad_lines or self.error_bad_lines:
msg = str(e)
if 'NULL byte' in msg:
msg = ('NULL byte detected. This byte '
'cannot be processed in Python\'s '
'native csv library at the moment, '
'so please pass in engine=\'c\' instead')
elif 'newline inside string' in msg:
msg = ('EOF inside string starting with '
'line ' + str(row_num))
if self.skipfooter > 0:
reason = ('Error could possibly be due to '
'parsing errors in the skipped footer rows '
'(the skipfooter keyword is only applied '
'after Python\'s csv library has parsed '
'all rows).')
msg += '. ' + reason
self._alert_malformed(msg, row_num)
return None
def _check_comments(self, lines):
if self.comment is None:
return lines
ret = []
for l in lines:
rl = []
for x in l:
if (not isinstance(x, compat.string_types) or
self.comment not in x):
rl.append(x)
else:
x = x[:x.find(self.comment)]
if len(x) > 0:
rl.append(x)
break
ret.append(rl)
return ret
def _remove_empty_lines(self, lines):
"""
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed.
"""
ret = []
for l in lines:
# Remove empty lines and lines with only one whitespace value
if (len(l) > 1 or len(l) == 1 and
(not isinstance(l[0], compat.string_types) or
l[0].strip())):
ret.append(l)
return ret
def _check_thousands(self, lines):
if self.thousands is None:
return lines
return self._search_replace_num_columns(lines=lines,
search=self.thousands,
replace='')
def _search_replace_num_columns(self, lines, search, replace):
ret = []
for l in lines:
rl = []
for i, x in enumerate(l):
if (not isinstance(x, compat.string_types) or
search not in x or
(self._no_thousands_columns and
i in self._no_thousands_columns) or
self.nonnum.search(x.strip())):
rl.append(x)
else:
rl.append(x.replace(search, replace))
ret.append(rl)
return ret
def _check_decimal(self, lines):
if self.decimal == _parser_defaults['decimal']:
return lines
return self._search_replace_num_columns(lines=lines,
search=self.decimal,
replace='.')
def _clear_buffer(self):
self.buf = []
_implicit_index = False
def _get_index_name(self, columns):
"""
Try several cases to get lines:
0) There are headers on row 0 and row 1 and their
total summed lengths equals the length of the next line.
Treat row 0 as columns and row 1 as indices
1) Look for implicit index: there are more columns
on row 1 than row 0. If this is true, assume that row
1 lists index columns and row 0 lists normal columns.
2) Get index from the columns if it was listed.
"""
orig_names = list(columns)
columns = list(columns)
try:
line = self._next_line()
except StopIteration:
line = None
try:
next_line = self._next_line()
except StopIteration:
next_line = None
# implicitly index_col=0 b/c 1 fewer column names
implicit_first_cols = 0
if line is not None:
# leave it 0, #2442
# Case 1
if self.index_col is not False:
implicit_first_cols = len(line) - self.num_original_columns
# Case 0
if next_line is not None:
if len(next_line) == len(line) + self.num_original_columns:
# column and index names on diff rows
self.index_col = lrange(len(line))
self.buf = self.buf[1:]
for c in reversed(line):
columns.insert(0, c)
# Update list of original names to include all indices.
orig_names = list(columns)
self.num_original_columns = len(columns)
return line, orig_names, columns
if implicit_first_cols > 0:
# Case 1
self._implicit_index = True
if self.index_col is None:
self.index_col = lrange(implicit_first_cols)
index_name = None
else:
# Case 2
(index_name, columns_,
self.index_col) = _clean_index_names(columns, self.index_col)
return index_name, orig_names, columns
def _rows_to_cols(self, content):
col_len = self.num_original_columns
if self._implicit_index:
col_len += len(self.index_col)
max_len = max([len(row) for row in content])
# Check that there are no rows with too many
# elements in their row (rows with too few
# elements are padded with NaN).
if (max_len > col_len and
self.index_col is not False and
self.usecols is None):
footers = self.skipfooter if self.skipfooter else 0
bad_lines = []
iter_content = enumerate(content)
content_len = len(content)
content = []
for (i, l) in iter_content:
actual_len = len(l)
if actual_len > col_len:
if self.error_bad_lines or self.warn_bad_lines:
row_num = self.pos - (content_len - i + footers)
bad_lines.append((row_num, actual_len))
if self.error_bad_lines:
break
else:
content.append(l)
for row_num, actual_len in bad_lines:
msg = ('Expected %d fields in line %d, saw %d' %
(col_len, row_num + 1, actual_len))
if len(self.delimiter) > 1 and self.quoting != csv.QUOTE_NONE:
# see gh-13374
reason = ('Error could possibly be due to quotes being '
'ignored when a multi-char delimiter is used.')
msg += '. ' + reason
self._alert_malformed(msg, row_num + 1)
# see gh-13320
zipped_content = list(lib.to_object_array(
content, min_width=col_len).T)
if self.usecols:
if self._implicit_index:
zipped_content = [
a for i, a in enumerate(zipped_content)
if (i < len(self.index_col) or
i - len(self.index_col) in self._col_indices)]
else:
zipped_content = [a for i, a in enumerate(zipped_content)
if i in self._col_indices]
return zipped_content
def _get_lines(self, rows=None):
lines = self.buf
new_rows = None
# already fetched some number
if rows is not None:
# we already have the lines in the buffer
if len(self.buf) >= rows:
new_rows, self.buf = self.buf[:rows], self.buf[rows:]
# need some lines
else:
rows -= len(self.buf)
if new_rows is None:
if isinstance(self.data, list):
if self.pos > len(self.data):
raise StopIteration
if rows is None:
new_rows = self.data[self.pos:]
new_pos = len(self.data)
else:
new_rows = self.data[self.pos:self.pos + rows]
new_pos = self.pos + rows
# Check for stop rows. n.b.: self.skiprows is a set.
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)]
lines.extend(new_rows)
self.pos = new_pos
else:
new_rows = []
try:
if rows is not None:
for _ in range(rows):
new_rows.append(next(self.data))
lines.extend(new_rows)
else:
rows = 0
while True:
new_row = self._next_iter_line(
row_num=self.pos + rows + 1)
rows += 1
if new_row is not None:
new_rows.append(new_row)
except StopIteration:
if self.skiprows:
new_rows = [row for i, row in enumerate(new_rows)
if not self.skipfunc(i + self.pos)]
lines.extend(new_rows)
if len(lines) == 0:
raise
self.pos += len(new_rows)
self.buf = []
else:
lines = new_rows
if self.skipfooter:
lines = lines[:-self.skipfooter]
lines = self._check_comments(lines)
if self.skip_blank_lines:
lines = self._remove_empty_lines(lines)
lines = self._check_thousands(lines)
return self._check_decimal(lines)
def _make_date_converter(date_parser=None, dayfirst=False,
infer_datetime_format=False):
def converter(*date_cols):
if date_parser is None:
strs = _concat_date_cols(date_cols)
try:
return tools.to_datetime(
_ensure_object(strs),
utc=None,
box=False,
dayfirst=dayfirst,
errors='ignore',
infer_datetime_format=infer_datetime_format
)
except:
return tools.to_datetime(
lib.try_parse_dates(strs, dayfirst=dayfirst))
else:
try:
result = tools.to_datetime(
date_parser(*date_cols), errors='ignore')
if isinstance(result, datetime.datetime):
raise Exception('scalar parser')
return result
except Exception:
try:
return tools.to_datetime(
lib.try_parse_dates(_concat_date_cols(date_cols),
parser=date_parser,
dayfirst=dayfirst),
errors='ignore')
except Exception:
return generic_parser(date_parser, *date_cols)
return converter
def _process_date_conversion(data_dict, converter, parse_spec,
index_col, index_names, columns,
keep_date_col=False):
def _isindex(colspec):
return ((isinstance(index_col, list) and
colspec in index_col) or
(isinstance(index_names, list) and
colspec in index_names))
new_cols = []
new_data = {}
orig_names = columns
columns = list(columns)
date_cols = set()
if parse_spec is None or isinstance(parse_spec, bool):
return data_dict, columns
if isinstance(parse_spec, list):
# list of column lists
for colspec in parse_spec:
if is_scalar(colspec):
if isinstance(colspec, int) and colspec not in data_dict:
colspec = orig_names[colspec]
if _isindex(colspec):
continue
data_dict[colspec] = converter(data_dict[colspec])
else:
new_name, col, old_names = _try_convert_dates(
converter, colspec, data_dict, orig_names)
if new_name in data_dict:
raise ValueError('New date column already in dict %s' %
new_name)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
elif isinstance(parse_spec, dict):
# dict of new name to column list
for new_name, colspec in compat.iteritems(parse_spec):
if new_name in data_dict:
raise ValueError('Date column %s already in dict' %
new_name)
_, col, old_names = _try_convert_dates(converter, colspec,
data_dict, orig_names)
new_data[new_name] = col
new_cols.append(new_name)
date_cols.update(old_names)
data_dict.update(new_data)
new_cols.extend(columns)
if not keep_date_col:
for c in list(date_cols):
data_dict.pop(c)
new_cols.remove(c)
return data_dict, new_cols
def _try_convert_dates(parser, colspec, data_dict, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int) and c not in columns:
colnames.append(columns[c])
else:
colnames.append(c)
new_name = '_'.join([str(x) for x in colnames])
to_parse = [data_dict[c] for c in colnames if c in data_dict]
new_col = parser(*to_parse)
return new_name, new_col, colnames
def _clean_na_values(na_values, keep_default_na=True):
if na_values is None:
if keep_default_na:
na_values = _NA_VALUES
else:
na_values = set()
na_fvalues = set()
elif isinstance(na_values, dict):
na_values = na_values.copy() # Prevent aliasing.
if keep_default_na:
for k, v in compat.iteritems(na_values):
if not is_list_like(v):
v = [v]
v = set(v) | _NA_VALUES
na_values[k] = v
na_fvalues = dict([
(k, _floatify_na_values(v)) for k, v in na_values.items() # noqa
])
else:
if not is_list_like(na_values):
na_values = [na_values]
na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | _NA_VALUES
na_fvalues = _floatify_na_values(na_values)
return na_values, na_fvalues
def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
return None, columns, index_col
columns = list(columns)
cp_cols = list(columns)
index_names = []
# don't mutate
index_col = list(index_col)
for i, c in enumerate(index_col):
if isinstance(c, compat.string_types):
index_names.append(c)
for j, name in enumerate(cp_cols):
if name == c:
index_col[i] = j
columns.remove(name)
break
else:
name = cp_cols[c]
columns.remove(name)
index_names.append(name)
# hack
if isinstance(index_names[0], compat.string_types)\
and 'Unnamed' in index_names[0]:
index_names[0] = None
return index_names, columns, index_col
def _get_empty_meta(columns, index_col, index_names, dtype=None):
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not isinstance(dtype, dict):
# if dtype == None, default will be np.object.
default_dtype = dtype or np.object
dtype = defaultdict(lambda: default_dtype)
else:
# Save a copy of the dictionary.
_dtype = dtype.copy()
dtype = defaultdict(lambda: np.object)
# Convert column indexes to column names.
for k, v in compat.iteritems(_dtype):
col = columns[k] if is_integer(k) else k
dtype[col] = v
if index_col is None or index_col is False:
index = Index([])
else:
index = [Series([], dtype=dtype[index_name])
for index_name in index_names]
index = MultiIndex.from_arrays(index, names=index_names)
index_col.sort()
for i, n in enumerate(index_col):
columns.pop(n - i)
col_dict = dict((col_name,
Series([], dtype=dtype[col_name]))
for col_name in columns)
return index, columns, col_dict
def _floatify_na_values(na_values):
# create float versions of the na_values
result = set()
for v in na_values:
try:
v = float(v)
if not np.isnan(v):
result.add(v)
except:
pass
return result
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)
# we are like 999 here
if v == int(v):
v = int(v)
result.append("%s.0" % v)
result.append(str(v))
result.append(v)
except:
pass
try:
result.append(int(x))
except:
pass
return set(result)
def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
return na_values[col], na_fvalues[col]
else:
return _NA_VALUES, set()
else:
return na_values, na_fvalues
def _get_col_names(colspec, columns):
colset = set(columns)
colnames = []
for c in colspec:
if c in colset:
colnames.append(c)
elif isinstance(c, int):
colnames.append(columns[c])
return colnames
def _concat_date_cols(date_cols):
if len(date_cols) == 1:
if compat.PY3:
return np.array([compat.text_type(x) for x in date_cols[0]],
dtype=object)
else:
return np.array([
str(x) if not isinstance(x, compat.string_types) else x
for x in date_cols[0]
], dtype=object)
rs = np.array([' '.join([compat.text_type(y) for y in x])
for x in zip(*date_cols)], dtype=object)
return rs
class FixedWidthReader(BaseIterator):
"""
A reader of fixed-width lines.
"""
def __init__(self, f, colspecs, delimiter, comment, skiprows=None):
self.f = f
self.buffer = None
self.delimiter = '\r\n' + delimiter if delimiter else '\n\r\t '
self.comment = comment
if colspecs == 'infer':
self.colspecs = self.detect_colspecs(skiprows=skiprows)
else:
self.colspecs = colspecs
if not isinstance(self.colspecs, (tuple, list)):
raise TypeError("column specifications must be a list or tuple, "
"input was a %r" % type(colspecs).__name__)
for colspec in self.colspecs:
if not (isinstance(colspec, (tuple, list)) and
len(colspec) == 2 and
isinstance(colspec[0], (int, np.integer, type(None))) and
isinstance(colspec[1], (int, np.integer, type(None)))):
raise TypeError('Each column specification must be '
'2 element tuple or list of integers')
def get_rows(self, n, skiprows=None):
"""
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= n lines)
from the rows returned to detect_colspecs because
it's simpler to leave the other locations with
skiprows logic alone than to modify them to deal
with the fact we skipped some rows here as well.
Parameters
----------
n : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
"""
if skiprows is None:
skiprows = set()
buffer_rows = []
detect_rows = []
for i, row in enumerate(self.f):
if i not in skiprows:
detect_rows.append(row)
buffer_rows.append(row)
if len(detect_rows) >= n:
break
self.buffer = iter(buffer_rows)
return detect_rows
def detect_colspecs(self, n=100, skiprows=None):
# Regex escape the delimiters
delimiters = ''.join([r'\%s' % x for x in self.delimiter])
pattern = re.compile('([^%s]+)' % delimiters)
rows = self.get_rows(n, skiprows)
if not rows:
raise EmptyDataError("No rows from which to infer column width")
max_len = max(map(len, rows))
mask = np.zeros(max_len + 1, dtype=int)
if self.comment is not None:
rows = [row.partition(self.comment)[0] for row in rows]
for row in rows:
for m in pattern.finditer(row):
mask[m.start():m.end()] = 1
shifted = np.roll(mask, 1)
shifted[0] = 0
edges = np.where((mask ^ shifted) == 1)[0]
edge_pairs = list(zip(edges[::2], edges[1::2]))
return edge_pairs
def __next__(self):
if self.buffer is not None:
try:
line = next(self.buffer)
except StopIteration:
self.buffer = None
line = next(self.f)
else:
line = next(self.f)
# Note: 'colspecs' is a sequence of half-open intervals.
return [line[fromm:to].strip(self.delimiter)
for (fromm, to) in self.colspecs]
class FixedWidthFieldParser(PythonParser):
"""
Specialization that Converts fixed-width fields into DataFrames.
See PythonParser for details.
"""
def __init__(self, f, **kwds):
# Support iterators, convert to a list.
self.colspecs = kwds.pop('colspecs')
PythonParser.__init__(self, f, **kwds)
def _make_reader(self, f):
self.data = FixedWidthReader(f, self.colspecs, self.delimiter,
self.comment, self.skiprows)
|
bsd-3-clause
|
CGATOxford/proj029
|
Proj029Pipelines/pipeline_cogspecificfastq.py
|
1
|
4054
|
"""
=======================================
defining a set of housekeeping genes
=======================================
:Author: Nick Ilott
:Release: $Id$
:Date: |today|
:Tags: Python
"""
# load modules
from ruffus import *
import CGAT.Experiment as E
import logging as L
import CGAT.Database as Database
import CGAT.CSV as CSV
import sys
import os
import re
import shutil
import itertools
import math
import glob
import time
import gzip
import collections
import random
import numpy as np
import sqlite3
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import CGAT.IndexedFasta as IndexedFasta
from rpy2.robjects import r as R
import rpy2.robjects as ro
import rpy2.robjects.vectors as rovectors
from rpy2.rinterface import RRuntimeError
import pandas
import PipelineProj029
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
import CGATPipelines.Pipeline as P
P.getParameters(
["pipeline.ini"])
PARAMS = P.PARAMS
###################################################################
# connecting to database
###################################################################
def connect():
'''connect to database.
This method also attaches to helper databases.
'''
dbh = sqlite3.connect(PARAMS["database"])
return dbh
###################################################
###################################################
###################################################
# global variables
COG = PARAMS.get("cog")
###################################################
###################################################
###################################################
@transform(PARAMS.get("igc_file"),
regex("(\S+)/(\S+)"),
r"gene_names.tsv.gz")
def getGenesAssociatedWithCOG(infile, outfile):
'''
get all genes from the IGC that are associated with
a COG
'''
cog = PARAMS.get("cog")
gene2cog = PARAMS.get("gene2cog")
statement = '''python %(scriptsdir)s/cog2fasta.py
-m %(gene2cog)s
-c %(cog)s
-f %(infile)s
--output-list
--log=%(outfile)s.log'''
P.run()
#########################################
#########################################
#########################################
@follows(mkdir("fastq.dir"), getGenesAssociatedWithCOG)
@transform("*.fastq.gz",
regex("(\S+).fastq.gz"),
r"fastq.dir/%s_\1.list.gz" % COG)
def buildReadList(infile, outfile):
'''
build list of fastq reads mapping to COG
'''
afile = P.snip(infile, ".fastq.gz") + ".diamond.genes.tsv.gz"
alignment = os.path.join(PARAMS.get("rna_communities_dir"), afile)
statement = '''python %(scriptsdir)s/genes2reads.py
-g gene_names.tsv.gz
-a %(alignment)s
--log=%(outfile)s.log
| gzip
> %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
@transform("*.fastq.gz",
regex("(\S+).fastq.gz"),
add_inputs(buildReadList),
r"fastq.dir/%s_\1.fastq.gz" % COG)
def buildFastq(infiles, outfile):
'''
filter fastq files based on reads
mapping to COG
'''
fastq = infiles[0]
reads = [x for x in infiles[1:] if P.snip(x, ".list.gz") in outfile][0]
statement = '''zcat %(fastq)s
| python %(cgat_scriptsdir)s/fastq2fastq.py
--apply=%(reads)s
--log=%(outfile)s.log
| gzip > %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
bsd-3-clause
|
kkozarev/mwacme
|
src/fit_powerlaw_spectra_integrated.py
|
2
|
4406
|
import numpy as np
import os,sys
from scipy import optimize
import matplotlib.pyplot as plt
import matplotlib.dates as pltdates
from astropy.io import ascii
from datetime import datetime
#This script will fit a power law to the moving source synchrotron spectrum
#The new data location
if sys.platform == 'darwin': BASEDIR='/Volumes/Transcend/MWA_DATA/'
if sys.platform == 'linux2': BASEDIR='/mnt/MWA_DATA/'
avgperiod='1sec'
#datadir=BASEDIR+'max_spectra/normalized/'+avgperiod+'/'
datadir=BASEDIR+'synchrotron/subset/spectra_integrated/'
polarization='XX'
#sourcetype={'1':'Moving','2':'Stationary'}
sourcetype={'1':'Moving'} #Do not modify!
sourcetype={'2':'Stationary'}
#Read in the data
spectrafile='max2_XX_spectra_integrated.txt'
#frequencies=[79.8,88.76,97.72,107.96,119.48,132.28,145.08]
frequencies=np.array([88.76,97.72,107.96,119.48])
logx=np.log10(frequencies)
spectradata=ascii.read(datadir+spectrafile,data_start=1)
fluxdata=np.array(spectradata)
amps=[]
indices=[]
times=[]
indexerrors=[]
for ii,rval in enumerate(fluxdata):
if ii > 0:
#date,time,s1,e1,s2,e2,s3,e3,s4,e4,s5,e5,s6,e6=rval
date,time,s1,e1,s2,e2,s3,e3,s4,e4=rval
dt=date + ' '+time
times.append(datetime.strptime(dt,"%Y/%m/%d %H:%M:%S"))
#spectrum=np.array([s1,s2,s3,s4,s5,s6])
#yerr=np.array([e1,e2,e3,e4,e5,e6])+0.001
spectrum=np.array([s1,s2,s3,s4])
yerr=np.array([e1,e2,e3,e4])+0.001
logy=np.log10(spectrum)
logyerr = yerr / spectrum
p0 = [logy[0],1] # Initial guess for the parameters
fitfunc = lambda p, x: p[0] + p[1] * x # Target function
# Distance to the target function
#errfunc = lambda p, x, y: fitfunc(p, x) - y
#out = optimize.leastsq(errfunc, p0[:], args=(logx,logy), full_output=1)
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
out = optimize.leastsq(errfunc, p0[:], args=(logx,logy,logyerr), full_output=1)
p1 = out[0]
#print p1
covar = out[1]
index = p1[1]
indices.append(index)
amp = 10.0**p1[0]
amps.append(amp)
if covar is None: indexErr = 1.e-20
else: indexErr = np.sqrt(covar[0][0])
indexerrors.append(indexErr)
#print indices
#print indexerrors
#fit=amp*np.power(frequencies,index)
fig, ax = plt.subplots()
datefmt=pltdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(datefmt)
plt.plot_date(x=times,y=indices,fmt='r-',drawstyle='steps-mid',linewidth=1)
plt.ylabel("Power law index")
plt.xlabel("Time of "+date)
plt.title("Integrated Moving Source Power Law Index")
plt.errorbar(times,indices,yerr=indexerrors,fmt='o',markersize=0.1,linewidth=1)
plt.savefig(datadir+'moving_source_spectral_indices'+'_'+polarization+"_synchrotron_integrated_"+avgperiod+".png")
plt.close()
#plt.plot(frequencies,fit,'b-')
#plt.plot(frequencies,spectrum,'ro')
#plt.step(frequencies,spectrum,color='r',where='mid')
#plt.yscale('log')
#plt.xscale('log')
#plt.show()
# Plot the inferred electron power law.
elecplawindices=list(2*np.array(indices)+1)
elecplawindiceserr=list(2*np.array(indexerrors))
fig, ax = plt.subplots()
datefmt=pltdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(datefmt)
plt.plot_date(x=times,y=elecplawindices,fmt='r-',drawstyle='steps-mid',linewidth=1)
plt.ylabel("Inferred Electron Distribution Power Law Index")
plt.xlabel("Time of "+date)
plt.title("Moving Source Electron Distribution Power Law Index")
plt.errorbar(times,elecplawindices,yerr=elecplawindiceserr,fmt='o',markersize=0.1,linewidth=1)
plt.savefig(datadir+'normalized_moving_source_electron_distribution_indices'+'_'+polarization+"_synchrotron_"+avgperiod+".png")
plt.close()
# Plot the inferred degree of polarization.
plawind=np.array(elecplawindices)*(-1.)
degpol=list(((plawind+1.)/(plawind+(7./3.)))*100.)
#degpolerr=list(2*np.array(indexerrors))
fig, ax = plt.subplots()
datefmt=pltdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(datefmt)
plt.plot_date(x=times,y=degpol,fmt='r-',drawstyle='steps-mid',linewidth=1)
plt.ylabel("Polarization Degree, %")
plt.xlabel("Time of "+date)
plt.title("Moving Source Inferred Polarization Degree")
#plt.errorbar(times,elecplawindices,yerr=degpolerr,fmt='o',markersize=0.1,linewidth=1)
plt.savefig(datadir+'normalized_moving_source_electron_polarization_degree'+'_'+polarization+"_synchrotron_"+avgperiod+".png")
plt.close()
|
gpl-2.0
|
haphaeu/yoshimi
|
spyder_workspace/Statistics/stats_playaounrd.py
|
2
|
10651
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 01 10:55:07 2015
@author: rarossi
"""
from scipy import stats
from matplotlib import pyplot as plt
import numpy as np
from numpy import (arange, sort, array, around, sqrt, log)
from scipy.stats import distributions
#from scipy import optimize
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
def anderson(x,dist='gumbel_l'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for Gumbel right and left skewed distributions.
This is a modified version of the anderson from scipy.stats.
This version only works for Gumbel distribution and implements
both gumbel_l and gumbel_r.
Parameters
----------
x : array_like
array of sample data
dist : {'gumbel_l','gumbel_r'}, optional
the type of distribution to test against. The default is 'gumbel_l'
Returns
-------
A2 : float
The Anderson-Darling test statistic
critical : list
The critical values for this distribution
sig : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
I.e, A2 the smaller A2 the better.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if not dist in ['gumbel_l', 'gumbel_r']:
raise ValueError("Invalid distribution; dist must be 'gumbel_l', "
"or 'gumbel_r'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'gumbel_l':
xbar, s = distributions.gumbel_l.fit(x)
w = (y-xbar)/s
z = distributions.gumbel_l.cdf(w)
else: # (dist == 'gumbel_r')
xbar, s = distributions.gumbel_r.fit(x)
w = (y-xbar)/s
z = distributions.gumbel_r.cdf(w)
sig = array([25,10,5,2.5,1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)),3)
i = arange(1,N+1)
S = sum((2*i-1.0)/N*(log(z)+log(1-z[::-1])),axis=0)
A2 = -N-S
return A2, critical, sig
def best_fit(tail):
"""Uses anderson() to find if the data fit better to a gumbel_l or gumbel_r distribution.
"""
if anderson(tail, 'gumbel_l')[0] < anderson(tail, 'gumbel_r')[0]:
return stats.gumbel_l
else:
return stats.gumbel_r
tailmax = [63.96574, 97.298988, 94.909447, 68.972389, 66.959663, 65.273926, 84.017792,
151.969101, 102.427635, 89.193542, 166.099564, 242.734909, 108.961304,
92.404419, 66.82309, 75.563385, 57.813889, 105.774223, 67.279755,
86.967712, 74.676308, 75.610916, 76.830391, 116.130943, 74.360512,
180.654968, 103.529533, 111.010529, 149.957794, 70.670921, 129.729172,
169.322662, 67.288033, 171.149643, 164.885376, 83.445435, 97.363678,
91.548149, 133.007599, 94.024841, 180.025314, 152.898285, 98.16478,
102.753448, 121.835167, 123.101646, 69.144646, 83.610756, 66.493416,
123.173607, 79.628456, 67.263359, 83.174103, 88.059792, 112.641327,
80.654366, 73.827545, 68.375336, 61.010174, 125.977417, 99.530197,
131.255432, 71.376617, 97.577362, 211.96611, 66.308861, 228.915405,
77.390526, 143.579269, 61.86153, 100.229225, 103.135757, 85.798904,
165.273346, 72.525803, 68.17765, 105.367897, 108.670799, 288.688293,
140.604355, 109.679535, 95.767075, 105.137451, 93.383766, 186.602676,
119.793755, 90.300858, 66.34745, 83.441994, 107.981186, 127.843536,
133.305923, 84.114273, 117.124664, 69.92189, 91.000229, 187.557693,
73.156982, 92.472664, 115.126579]
tailmin=[12.778043, 7.1879, 13.314878, 8.776149, 9.805628, 6.535013, 11.488866, 2.293265, 8.724171,
0.516041, 7.964844, 7.569489, 14.650825, 3.988805, 12.13273, 15.306945, 15.383864,
2.656031, 15.595515, 7.864027, 16.6761, 16.082186, 13.647097, 5.082881, 12.534531,
5.564847, 7.260636, 3.754782, 0.453892, 10.34429, 0.477659, 13.715585, 6.424349, 5.42458,
18.09203, 12.374338, 7.28268, 5.973233, 7.73395, 3.02955, 16.93425, 6.616414, 1.377988,
16.089392, 4.625199, 11.124841, 7.413293, 12.457375, 0.074295, 19.787035, 7.31939,
2.731352, 10.156926, 8.684792, 11.394233, 14.931318, 11.838491, 2.556698, 15.741388,
1.014213, 0.480564, 14.461552, 7.140567, 3.386644, 18.038301, 4.436148, 12.638147,
9.086492, 0.373772, 10.272347, 6.930659, 2.091561, 6.558409, 14.133745, 17.415342,
5.365997, 9.676363, 12.914423, 10.606887, 17.576532, 12.987781, 2.375166, 1.529086,
11.657972, 5.48551, 18.472898, 4.56945, 7.49713, 9.266244, 15.596133]
for tail in [tailmax, tailmin]:
plt.figure()
title('TEST TITLE')
plt.subplot(221)
plt.hist(tail)
gumbel = best_fit(tail)
loc, scale = gumbel.fit(tail)
mygl = gumbel(loc=loc, scale=scale)
plt.subplot(222)
stats.probplot(tail,dist=mygl,plot=plt)
title('bets fit gumbel')
loc, scale = stats.gumbel_l.fit(tail)
mygl = stats.gumbel_l(loc=loc, scale=scale)
plt.subplot(223)
stats.probplot(tail,dist=mygl,plot=plt)
title('gumbel l')
loc, scale = stats.gumbel_r.fit(tail)
mygr = stats.gumbel_r(loc=loc, scale=scale)
plt.subplot(224)
stats.probplot(tail,dist=mygr,plot=plt)
title('gumbel r')
#import pandas
#
##list with the path to various results files from repeated lowering analyses
#with open('list_results.txt', 'r') as pf:
# list_results = pf.readlines()
#
##write statistics to this file
#global_statistics_file = open('global_statistics.txt','w')
#global_statistics_file.write('File \t Total Cases \t Max \t Min \t Max_l \t Max_r \t ')
#global_statistics_file.write('Min_l \t Min_r \t Max_bad \t Min_bad\n')
#
#for resfile in list_results:
# print resfile
# global_statistics_file.write('%s \t ' % resfile[:-1])
# try:
# res = pandas.read_table(resfile[:-1])
# range_hs = set(res['WaveHs'])
# range_tp = set(res['WaveTp'])
# range_wd = set(res['WaveDirection'])
# counter = 0
# counter_max = 0
# counter_min = 0
# counter_max_bad = 0
# counter_min_bad = 0
# counter_max_r = 0
# counter_min_l = 0
# for wd in range_wd:
# res_wd = res[res['WaveDirection'] == wd]
# for hs in range_hs:
# res_hs = res_wd[res_wd['WaveHs'] == hs]
# for tp in range_tp:
# res_tp = res_hs[res_hs['WaveTp'] == tp]
# for col in res.columns[3:]:
# A2_l, critical_l, lvls_l = anderson(res_tp[col],'gumbel_l')
# A2_r, critical_r, lvls_r = anderson(res_tp[col],'gumbel_r')
# if False:
# print '%ddeg \t %.2fm \t %ds \t ' % (wd, hs, tp),
# print '{:<30s} \t '.format(col),
# print '%.2f \t %.2f \t ' % (min((A2_l, A2_r)), max(critical_l)),
# if A2_l < A2_r: print 'l \t ',
# else: print 'r \t ',
# if min((A2_l, A2_r)) > max(critical_r): print 'not ok'
# else: print ''
#
# counter += 1
# if col.lower().find('max') is not -1:
# counter_max += 1
# if min((A2_l, A2_r)) < max(critical_r):
# if A2_r < A2_l: counter_max_r += 1
# else: counter_max_bad += 1
# else:
# counter_min += 1
# if min((A2_l, A2_r)) < max(critical_r):
# if A2_l < A2_r: counter_min_l += 1
# else: counter_min_bad += 1
# except:
# global_statistics_file.write('fail\n')
# print 'fail'
# continue
#
# #write to a file
# global_statistics_file.write('%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n' %
# (counter, counter_max, counter_min,
# counter_max-counter_max_r-counter_max_bad, counter_max_r,
# counter_min_l, counter_min-counter_min_l-counter_min_bad,
# counter_max_bad, counter_min_bad))
# global_statistics_file.flush()
# #write to screen
# print 'Out of a total of %d cases, %d are max and %d are min' % (counter, counter_max, counter_min)
# print 'Max cases: %d fit gumbel_r, %d fit gumbel_l and %d didn\'t fit.' % (
# counter_max_r, counter_max-counter_max_r-counter_max_bad, counter_max_bad)
# print 'Min cases: %d fit gumbel_r, %d fit gumbel_l and %d didn\'t fit.' % (
# counter_min-counter_min_l-counter_min_bad, counter_min_l, counter_min_bad)
# print ''
# print ''
#
#global_statistics_file.close()
|
lgpl-3.0
|
treycausey/scikit-learn
|
sklearn/metrics/setup.py
|
299
|
1024
|
import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
bsd-3-clause
|
manahl/arctic
|
tests/integration/scripts/test_copy_data.py
|
1
|
6522
|
import pytest
from mock import patch, call
from pandas.util.testing import assert_frame_equal
from arctic import arctic as m
from arctic.scripts import arctic_copy_data as mcd
from ...util import read_str_as_pandas, run_as_main
@pytest.fixture(scope='function', autouse=True)
def init(arctic):
arctic.initialize_library('user.library', m.VERSION_STORE, segment='month')
arctic.initialize_library('user.library2', m.VERSION_STORE, segment='month')
ts = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0""")
ts1 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 4.0
2012-10-08 17:06:11.040 | 5.0
2012-10-09 17:06:11.040 | 6.5
2012-11-08 17:06:11.040 | 7.0""")
ts2 = read_str_as_pandas(""" times | near
2012-10-08 17:06:11.040 | 5.0
2012-10-09 17:06:11.040 | 6.5""")
ts3 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 5.0
2012-10-09 17:06:11.040 | 6.5
2012-11-08 17:06:11.040 | 3.0""")
def test_copy_data_no_force(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Put ts, ts1 in library
arctic[src].write('some_ts', ts1)
arctic[src].write('some_ts1', ts1)
# Put some other value for ts in library2
arctic[dest].write('some_ts', ts)
# Create the user against the current mongo database
src_host = 'arctic_' + src + '@' + mongo_host
dest_host = 'arctic_' + dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', 'some_ts', 'some_ts1')
assert_frame_equal(ts, arctic[dest].read('some_ts').data)
assert_frame_equal(ts1, arctic[dest].read('some_ts1').data)
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 2 symbols')]
assert logger.warn.call_args_list == [call('Symbol: some_ts already exists in %s, use --force to overwrite or --splice to join with existing data' % dest_host)]
assert arctic[dest].read_audit_log('some_ts1')[0]['message'] == 'CR101'
def test_copy_data_force(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Put ts, ts1 in library
arctic[src].write('some_ts', ts)
arctic[src].write('some_ts1', ts1)
# Put some other value for ts in library2
arctic[dest].write('some_ts', ts1)
# Create the user against the current mongo database
src_host = src + '@' + mongo_host
dest_host = dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', '--force', 'some_ts', 'some_ts1')
assert_frame_equal(ts, arctic[dest].read('some_ts').data)
assert_frame_equal(ts1, arctic[dest].read('some_ts1').data)
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 2 symbols')]
assert logger.warn.call_args_list == [call('Symbol: some_ts already exists in destination, OVERWRITING')]
assert arctic[dest].read_audit_log('some_ts1')[0]['message'] == 'CR101'
def test_copy_data_splice(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Put ts, ts1 in library
arctic[src].write('some_ts', ts2)
arctic[src].write('some_ts1', ts1)
# Put some other value for ts in library2
arctic[dest].write('some_ts', ts)
# Create the user against the current mongo database
src_host = src + '@' + mongo_host
dest_host = dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', '--splice', 'some_ts', 'some_ts1')
assert_frame_equal(ts3, arctic[dest].read('some_ts').data)
assert_frame_equal(ts1, arctic[dest].read('some_ts1').data)
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 2 symbols')]
assert logger.warn.call_args_list == [call('Symbol: some_ts already exists in destination, splicing in new data')]
assert arctic[dest].read_audit_log('some_ts')[0]['message'] == 'CR101'
def test_copy_data_wild(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Put ts, ts1 in library
arctic[src].write('some_a_ts', ts)
arctic[src].write('some_a_ts1', ts1)
arctic[src].write('some_b_ts1', ts1)
arctic[src].write('some_c_ts1', ts1)
# Create the user against the current mongo database
src_host = 'arctic_' + src + '@' + mongo_host
dest_host = 'arctic_' + dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', '.*_a_.*', '.*_b_.*')
assert_frame_equal(ts, arctic[dest].read('some_a_ts').data)
assert_frame_equal(ts1, arctic[dest].read('some_a_ts1').data)
assert_frame_equal(ts1, arctic[dest].read('some_b_ts1').data)
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 3 symbols')]
assert arctic[dest].read_audit_log('some_a_ts1')[0]['message'] == 'CR101'
def test_copy_data_doesnt_exist(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Create the user against the current mongo database
src_host = src + '@' + mongo_host
dest_host = dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', 'some_ts')
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 0 symbols')]
assert logger.warn.call_args_list == [call('No symbols found that matched those provided.')]
|
lgpl-2.1
|
tmladek/krocan-evaluator
|
evaluate.py
|
2
|
8938
|
#!/usr/bin/env python
import sys, os, csv, re, math
import matplotlib.pyplot as plt
from PyQt4.QtGui import QApplication, QDialog, QFileDialog, QMessageBox
from PyQt4.QtCore import QSettings
from ui_evaluator import Ui_Evaluator
def processFile(track):
frames = []
params = {}
with open(track, 'r') as f:
for line in f:
if not re.match('^[ ]*(\%|//)', line):
frames.append(re.split("[ ]+", line))
else:
tmp = re.findall('%ArenaCenterXY.0 \( ([0-9.]+) [0-9.]+ \)', line)
if tmp:
params["arena_x"] = float(tmp[0])
tmp = re.findall('%ArenaCenterXY.0 \( [0-9.]+ ([0-9.]+) \)', line)
if tmp:
params["arena_y"] = float(tmp[0])
tmp = re.findall('ArenaDiameter_m.0 \(( [0-9.]+ )\)', line)
if tmp:
params["diameter"] = float(tmp[0])
tmp = re.findall('ReinforcedSector.0 \( ([0-9.]+) \)', line)
if tmp:
params["shock_radius"] = float(tmp[0])
tmp = re.findall('TrackerResolution_PixPerCM.0 \( ([0-9.]+) \)', line)
if tmp:
params["pix_per_cm"] = float(tmp[0])
#todo: add a general solution
params["diameter"] *= 100*params["pix_per_cm"]
return (frames, params)
def analyseTrack(frames, params):
frames = [f for f in frames if f[2] is not '0' and f[3] is not '0']
entrances = 0
outside = True
for frame in frames:
if frame[5] != '0': # frame[5] = state
if outside:
outside = False
entrances += 1
else:
outside = True
distance = 0
for frame_pair in zip(frames[::5], frames[5::5]):
distance += math.hypot(float(frame_pair[0][2])-float(frame_pair[1][2]), float(frame_pair[0][3])-float(frame_pair[1][3]))
distance /= params["pix_per_cm"]
max_time_avoided = 0
outside = True
first_timestamp = frames[0][1]
for frame in frames:
if frame[5] != '0':
current_time_avoided = int(frame[1]) - int(first_timestamp)
if current_time_avoided > max_time_avoided:
max_time_avoided = current_time_avoided
outside = False
elif not outside:
outside = True
first_timestamp = frame[1]
time_first_entrance = 0
for frame in frames:
if frame[5] != '0':
time_first_entrance = frame[1]
break
shocks = 0
shocking = False
for frame in frames:
if frame[5] == '2':
if not shocking:
shocking = True
shocks += 1
else:
shocking = False
frames_in_centre = 0
inside = (params["diameter"]/2)/math.sqrt(2)
for frame in frames:
if math.hypot(float(frame[2])-params["arena_x"], float(frame[3])-params["arena_y"]) < inside:
frames_in_centre+=1
center_to_periphery = round(frames_in_centre/len(frames), 3)
return [ entrances, round(distance, 2), max_time_avoided, time_first_entrance, shocks, center_to_periphery ]
def renderGraphs_pair(tracks, params, filename):
arena_frame = plt.subplot2grid((1,2), (0,0))
arena_frame.set_title("Rat track [Arena frame]")
arena_frame.set_xlim([params["arena_x"]-params["diameter"]/2-5,params["arena_x"]+params["diameter"]/2+5])
arena_frame.set_ylim([params["arena_y"]-params["diameter"]/2-5,params["arena_y"]+params["diameter"]/2+5])
arena_frame.set_aspect('equal', adjustable='box')
arena_frame.axis('off')
arena_frame.add_artist(plt.Circle((params["arena_x"],params["arena_y"]),params["diameter"]/2,color='r',fill=False))
arena_frame.plot([float(f[2]) for f in tracks[0] if f[2] is not '0'], [float(f[3]) for f in tracks[0] if f[3] is not '0'])
xvals = []
yvals = []
for i in range(min(len(tracks[0]), len(tracks[1]))):
# god forgive me
if not ((tracks[0][i][2] is '0' and tracks[0][i][3] is '0') or (tracks[1][i][2] is '0' and tracks[1][i][3] is '0')):
xvals.append(float(tracks[0][i][2]) - float(tracks[1][i][2]))
yvals.append(float(tracks[0][i][3]) - float(tracks[1][i][3]))
robot_frame = plt.subplot2grid((1,2), (0,1))
robot_frame.set_title("Rat track [Robot frame]")
robot_frame.set_xlim(-params["diameter"], params["diameter"])
robot_frame.set_ylim(-params["diameter"], params["diameter"])
robot_frame.set_aspect('equal', adjustable='box')
robot_frame.axis('off')
robot_frame.add_artist(plt.Circle((0, 0), params["shock_radius"],color='y',fill=False))
robot_frame.plot(xvals, yvals)
# histogram = plt.subplot2grid((2,2), (1,0), colspan=2)
plt.savefig(filename)
def renderGraph (frames, params, filename):
fig, ax = plt.subplots()
ax.set_title("Track [Arena frame]")
ax.set_xlim([params["arena_x"]-params["diameter"]/2-5,params["arena_x"]+params["diameter"]/2+5])
ax.set_ylim([params["arena_y"]-params["diameter"]/2-5,params["arena_y"]+params["diameter"]/2+5])
ax.set_aspect('equal', adjustable='box')
ax.axis('off')
ax.add_artist(plt.Circle((params["arena_x"],params["arena_y"]),params["diameter"]/2,color='r',fill=False))
ax.plot([float(f[2]) for f in frames if f[2] is not '0'], [float(f[3]) for f in frames if f[3] is not '0'])
plt.savefig(filename)
class KrocanEvaluator(QDialog, Ui_Evaluator):
files = []
settings = QSettings('FGU AV', 'Evaluator')
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.addDirButton.clicked.connect(self.addDirButtonClicked)
self.addButton.clicked.connect(self.addButtonClicked)
self.removeButton.clicked.connect(self.removeButtonClicked)
self.clearButton.clicked.connect(self.clearButtonClicked)
self.processButton.clicked.connect(self.processButtonClicked)
self.singleRadio.toggled.connect(self.updateUI)
self.show()
def addDirButtonClicked(self, _):
directory = QFileDialog.getExistingDirectory(self, "Directory with tracks", self.settings.value("lastLogs"))
if directory:
self.settings.setValue("lastLogs", directory)
logs = [directory+'/'+x for x in os.listdir(directory) if x[-4:] == ".dat"]
logs = sorted(logs, key=os.path.getmtime)
self.files += logs
self.updateUI()
def addButtonClicked(self, _):
selected_files = QFileDialog.getOpenFileNames(self, "Open tracks", self.settings.value("lastLogs"), "Logs (*.dat)")
if selected_files:
self.settings.setValue("lastLogs", "/".join(selected_files[0].split('/')[:-1]))
row = self.fileList.currentRow()
if row is not -1:
self.files[row+1:row+1] = selected_files
else:
self.files += selected_files
self.updateUI()
def removeButtonClicked(self, _):
row = self.fileList.currentRow()
if row is not -1:
self.files.pop(row)
self.updateUI()
def clearButtonClicked(self, _):
self.files.clear()
self.updateUI()
def processButtonClicked(self, _):
message = QMessageBox()
try:
output_dir = QFileDialog.getExistingDirectory(self, "Output directory", self.settings.value("lastOutput"))
if output_dir:
self.settings.setValue("lastOutput", output_dir)
else:
return
with open(output_dir+'/tracks.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow([ "Filename", "Entrances", "Distance", "Maximum Time Avoided", "Time to first entrance", "Shocks", "Time spent in center"])
for track in self.files:
writer.writerow([os.path.basename(str(track))] + analyseTrack(*processFile(track)))
if self.singleRadio.isChecked():
for track in self.files:
basename = ".".join(os.path.basename(str(track)).split('.')[:-1])
output_filename = str(output_dir)+'/'+basename+'.png'
frames, params = processFile(track)
renderGraph(frames, params, output_filename)
else:
for track_pair in zip(self.files[::2], self.files[1::2]):
rat_frames, params = processFile(track_pair[0])
robot_frames, _ = processFile(track_pair[1])
basename = ".".join(os.path.basename(str(track_pair[0])).split('.')[:-1])
output_filename = str(output_dir)+'/'+basename+'.png'
renderGraphs_pair((rat_frames, robot_frames), params, output_filename)
except:
message.setText("Error, processing failed!\n%s | %s" % (sys.exc_info()[0],sys.exc_info()[1]))
else:
message.setText("Processing successful!\nSaved into \"%s\"" % output_dir)
message.exec_()
def updateUI(self):
self.fileList.clear()
if len(self.files) > 0:
if self.singleRadio.isChecked():
self.processButton.setEnabled(True)
for file in self.files:
self.fileList.addItem("[---] %s" % file)
else:
self.processButton.setEnabled(len(self.files) % 2 == 0)
rat = True
for file in self.files:
self.fileList.addItem("[%s] %s" % ("RAT" if rat else "ROB", file))
rat = not rat
else:
self.processButton.setEnabled(False)
self.fileList.addItem("Add files...")
def main():
app = QApplication(sys.argv)
evaluator = KrocanEvaluator()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
gpl-2.0
|
olakiril/pipeline
|
python/pipeline/legacy/pre.py
|
6
|
5935
|
import sh
from scipy import ndimage
from warnings import warn
from sklearn.metrics import roc_curve
import datajoint as dj
from . import rf, trippy
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from pprint import pprint
import pandas as pd
import os
try:
import c2s
except:
warn("c2s was not found. You won't be able to populate ExtracSpikes")
schema = dj.schema('pipeline_preprocessing', locals())
def normalize(img):
return (img - img.min())/(img.max()-img.min())
def bugfix_reshape(a):
return a.ravel(order='C').reshape(a.shape, order='F')
@schema
class SpikeInference(dj.Lookup):
definition = ...
def infer_spikes(self, X, dt, trace_name='ca_trace'):
assert self.fetch1['language'] == 'python', "This tuple cannot be computed in python."
fps = 1 / dt
spike_rates = []
N = len(X)
for i, trace in enumerate(X):
print('Predicting trace %i/%i' % (i+1,N))
trace['calcium'] = trace.pop(trace_name).T
trace['fps'] = fps
data = c2s.preprocess([trace], fps=fps)
data = c2s.predict(data, verbosity=0)
data[0]['spike_trace'] = data[0].pop('predictions').T
data[0].pop('calcium')
data[0].pop('fps')
spike_rates.append(data[0])
return spike_rates
@schema
class Spikes(dj.Computed):
definition = ...
def _make_tuples(self, key):
raise NotImplementedError("""This is an old style part table inherited from matlab.
call populate on dj.ExtracSpikes. This will call make_tuples on this class. Do not
call make_tuples in pre.Spikes!
""")
def make_tuples(self, key):
dt = 1/(ScanInfo() & key).fetch1['fps']
X = (Trace() & key).project('ca_trace').fetch.as_dict()
X = (SpikeInference() & key).infer_spikes(X, dt)
for x in X:
self.insert1(dict(key, **x))
@schema
class ExtractSpikes(dj.Computed):
definition = ...
@property
def populated_from(self):
# Segment and SpikeInference will be in the workspace if they are in the database
return ExtractTraces() * SpikeInference() & dict(language='python')
def _make_tuples(self, key):
self.insert1(key)
Spikes().make_tuples(key)
@schema
class Segment(dj.Imported):
definition = ...
def _make_tuples(self, key):
raise NotImplementedError('This table is populated from matlab')
@staticmethod
def reshape_masks(mask_pixels, mask_weights, px_height, px_width):
ret = np.zeros((px_height, px_width, len(mask_pixels)))
for i, (mp, mw) in enumerate(zip(mask_pixels, mask_weights)):
mask = np.zeros(px_height * px_width)
mask[mp.squeeze().astype(int) - 1] = mw.squeeze()
ret[..., i] = mask.reshape(px_height, px_width, order='F')
return ret
def mask_area_hists(self, outdir='./'):
# TODO: plot against firing rate once traces are repopulated
with sns.axes_style('ticks'):
fig, ax = plt.subplots()
for key in (self.project() * SegmentMethod() & dict(method_name='nmf')).fetch.as_dict:
area_per_pixel = np.prod((ScanInfo() & key).fetch1['um_width','um_height']) / \
np.prod((ScanInfo() & key).fetch1['px_width','px_height'])
areas = np.array([pxs*area_per_pixel for pxs in map(len, (SegmentMask() & key).fetch['mask_pixels'])])
ax.hist(areas, bins=20, alpha=.5, lw=0, label="A{animal_id}S{session}:{scan_idx}".format(**key))
ax.legend()
sns.despine(fig)
plt.show()
def plot_NMF_ROIs(self, outdir='./'):
sns.set_context('paper')
theCM = sns.blend_palette(['lime', 'gold', 'deeppink'], n_colors=10) # plt.cm.RdBu_r
for key in (self.project() * SegmentMethod()*SpikeInference() & dict(short_name='stm', method_name='nmf')).fetch.as_dict:
mask_px, mask_w, ca, sp = (SegmentMask()*Trace()*Spikes() & key).fetch.order_by('mask_id')['mask_pixels', 'mask_weights', 'ca_trace', 'spike_trace']
template = np.stack([normalize(bugfix_reshape(t)[..., key['slice']-1].squeeze())
for t in (ScanCheck() & key).fetch['template']], axis=2).mean(axis=2) # TODO: remove bugfix_reshape once djbug #191 is fixed
d1, d2 = tuple(map(int, (ScanInfo() & key).fetch1['px_height', 'px_width']))
masks = Segment.reshape_masks(mask_px, mask_w, d1, d2)
gs = plt.GridSpec(6,1)
try:
sh.mkdir('-p', os.path.expanduser(outdir) + '/scan_idx{scan_idx}/slice{slice}'.format(**key))
except:
pass
for cell, (ca_trace, sp_trace) in enumerate(zip(ca, sp)):
with sns.axes_style('white'):
fig = plt.figure(figsize=(6,8))
ax_image = fig.add_subplot(gs[2:,:])
with sns.axes_style('ticks'):
ax_ca = fig.add_subplot(gs[0,:])
ax_sp = fig.add_subplot(gs[1,:], sharex=ax_ca)
ax_ca.plot(ca_trace,'green', lw=1)
ax_sp.plot(sp_trace,'k',lw=1)
ax_image.imshow(template, cmap=plt.cm.gray)
ax_image.contour(masks[..., cell], colors=theCM, zorder=10 )
sns.despine(ax=ax_ca)
sns.despine(ax=ax_sp)
ax_ca.axis('tight')
ax_sp.axis('tight')
fig.suptitle("animal_id {animal_id}:session {session}:scan_idx {scan_idx}:{method_name}:slice{slice}:cell{cell}".format(cell=cell+1, **key))
fig.tight_layout()
plt.savefig(outdir + "/scan_idx{scan_idx}/slice{slice}/cell{cell:03d}_animal_id_{animal_id}_session_{session}.png".format(cell=cell+1, **key))
plt.close(fig)
schema.spawn_missing_classes()
|
lgpl-3.0
|
aequitas/home-assistant
|
homeassistant/components/smappee/__init__.py
|
6
|
12597
|
"""Support for Smappee energy monitor."""
import logging
from datetime import datetime, timedelta
import re
import voluptuous as vol
from requests.exceptions import RequestException
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_HOST
)
from homeassistant.util import Throttle
from homeassistant.helpers.discovery import load_platform
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Smappee'
DEFAULT_HOST_PASSWORD = 'admin'
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_HOST_PASSWORD = 'host_password'
DOMAIN = 'smappee'
DATA_SMAPPEE = 'SMAPPEE'
_SENSOR_REGEX = re.compile(
r'(?P<key>([A-Za-z]+))\=' +
r'(?P<value>([0-9\.]+))')
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Inclusive(CONF_CLIENT_ID, 'Server credentials'): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, 'Server credentials'): cv.string,
vol.Inclusive(CONF_USERNAME, 'Server credentials'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'Server credentials'): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_HOST_PASSWORD, default=DEFAULT_HOST_PASSWORD):
cv.string
}),
}, extra=vol.ALLOW_EXTRA)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
"""Set up the Smapee component."""
client_id = config.get(DOMAIN).get(CONF_CLIENT_ID)
client_secret = config.get(DOMAIN).get(CONF_CLIENT_SECRET)
username = config.get(DOMAIN).get(CONF_USERNAME)
password = config.get(DOMAIN).get(CONF_PASSWORD)
host = config.get(DOMAIN).get(CONF_HOST)
host_password = config.get(DOMAIN).get(CONF_HOST_PASSWORD)
smappee = Smappee(client_id, client_secret, username,
password, host, host_password)
if not smappee.is_local_active and not smappee.is_remote_active:
_LOGGER.error("Neither Smappee server or local component enabled.")
return False
hass.data[DATA_SMAPPEE] = smappee
load_platform(hass, 'switch', DOMAIN, {}, config)
load_platform(hass, 'sensor', DOMAIN, {}, config)
return True
class Smappee:
"""Stores data retrieved from Smappee sensor."""
def __init__(self, client_id, client_secret, username,
password, host, host_password):
"""Initialize the data."""
import smappy
self._remote_active = False
self._local_active = False
if client_id is not None:
try:
self._smappy = smappy.Smappee(client_id, client_secret)
self._smappy.authenticate(username, password)
self._remote_active = True
except RequestException as error:
self._smappy = None
_LOGGER.exception(
"Smappee server authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee server component init skipped.")
if host is not None:
try:
self._localsmappy = smappy.LocalSmappee(host)
self._localsmappy.logon(host_password)
self._local_active = True
except RequestException as error:
self._localsmappy = None
_LOGGER.exception(
"Local Smappee device authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee local component init skipped.")
self.locations = {}
self.info = {}
self.consumption = {}
self.sensor_consumption = {}
self.instantaneous = {}
if self._remote_active or self._local_active:
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update data from Smappee API."""
if self.is_remote_active:
service_locations = self._smappy.get_service_locations() \
.get('serviceLocations')
for location in service_locations:
location_id = location.get('serviceLocationId')
if location_id is not None:
self.sensor_consumption[location_id] = {}
self.locations[location_id] = location.get('name')
self.info[location_id] = self._smappy \
.get_service_location_info(location_id)
_LOGGER.debug("Remote info %s %s",
self.locations, self.info[location_id])
for sensors in self.info[location_id].get('sensors'):
sensor_id = sensors.get('id')
self.sensor_consumption[location_id]\
.update({sensor_id: self.get_sensor_consumption(
location_id, sensor_id,
aggregation=3, delta=1440)})
_LOGGER.debug("Remote sensors %s %s",
self.locations,
self.sensor_consumption[location_id])
self.consumption[location_id] = self.get_consumption(
location_id, aggregation=3, delta=1440)
_LOGGER.debug("Remote consumption %s %s",
self.locations,
self.consumption[location_id])
if self.is_local_active:
self.local_devices = self.get_switches()
_LOGGER.debug("Local switches %s", self.local_devices)
self.instantaneous = self.load_instantaneous()
_LOGGER.debug("Local values %s", self.instantaneous)
@property
def is_remote_active(self):
"""Return true if Smappe server is configured and working."""
return self._remote_active
@property
def is_local_active(self):
"""Return true if Smappe local device is configured and working."""
return self._local_active
def get_switches(self):
"""Get switches from local Smappee."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_command_control_config()
except RequestException as error:
_LOGGER.error(
"Error getting switches from local Smappee. (%s)",
error)
def get_consumption(self, location_id, aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_consumption(location_id,
start,
end,
aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def get_sensor_consumption(self, location_id, sensor_id,
aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_sensor_consumption(location_id,
sensor_id,
start,
end, aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def actuator_on(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn on actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_on(location_id, actuator_id, duration)
self._smappy.actuator_on(location_id, actuator_id, duration)
else:
self._localsmappy.on_command_control(actuator_id)
self._localsmappy.on_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def actuator_off(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn off actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_off(location_id, actuator_id, duration)
self._smappy.actuator_off(location_id, actuator_id, duration)
else:
self._localsmappy.off_command_control(actuator_id)
self._localsmappy.off_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def active_power(self):
"""Get sum of all instantaneous active power values from local hub."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_power()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def active_cosfi(self):
"""Get the average of all instantaneous cosfi values."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_cosfi()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def instantaneous_values(self):
"""ReportInstantaneousValues."""
if not self.is_local_active:
return
report_instantaneous_values = \
self._localsmappy.report_instantaneous_values()
report_result = \
report_instantaneous_values['report'].split('<BR>')
properties = {}
for lines in report_result:
lines_result = lines.split(',')
for prop in lines_result:
match = _SENSOR_REGEX.search(prop)
if match:
properties[match.group('key')] = \
match.group('value')
_LOGGER.debug(properties)
return properties
def active_current(self):
"""Get current active Amps."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['current'])
def active_voltage(self):
"""Get current active Voltage."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['voltage'])
def load_instantaneous(self):
"""LoadInstantaneous."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_instantaneous()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
|
apache-2.0
|
drusk/pml
|
test/test_pml/test_supervised/test_decision_trees.py
|
1
|
6613
|
# Copyright (C) 2012 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Unit tests for decision_trees module.
Examples from:
http://www.doc.ic.ac.uk/~sgc/teaching/pre2012/v231/lecture11.html
and
http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/mlbook/ch3.pdf
NOTE: weekends.data has been modified slightly: w5 parents switched to 'yes'
in order to make money unambiguously the best choice for splitting after
weather on the rainy branch.
@author: drusk
"""
import unittest
import pandas as pd
from hamcrest import assert_that, equal_to
from pml.supervised.decision_trees import id3
from pml.supervised.decision_trees import DecisionTree
from pml.data.loader import load
from pml.data.model import DataSet
from test import base_tests
from test.matchers.pml_matchers import equals_tree
from test.matchers.pandas_matchers import equals_series
class DecisionTreesTest(base_tests.BaseFileLoadingTest):
def test_id3_choose_feature_to_split(self):
data = load(self.relative_to_base("/datasets/weekends.data"))
root = id3.choose_feature_to_split(data)
self.assertEqual(root, "weather")
def test_id3_build_tree_marine_animals(self):
dataset = load(self.relative_to_base("/datasets/marine_animal.data"))
tree = id3.build_tree(dataset)
assert_that(tree,
equals_tree(
{"no_surfacing": {
"no": "no",
"yes": {
"has_flippers": {
"no": "no",
"yes": "yes"
}
}
}
}
)
)
def test_id3_build_tree_weekends(self):
dataset = load(self.relative_to_base("/datasets/weekends.data"))
tree = id3.build_tree(dataset)
assert_that(tree,
equals_tree(
{"weather": {
"sunny": {
"parents": {
"yes": "cinema",
"no": "tennis"
}
},
"windy": {
"parents": {
"yes": "cinema",
"no": {
"money": {
"rich": "shopping",
"poor": "cinema"
}
}
}
},
"rainy": {
"money": {
"poor": "cinema",
"rich": "stay in"
}
}
}}
)
)
def test_id3_build_tree_play_tennis(self):
dataset = load(self.relative_to_base("/datasets/play_tennis.data"),
delimiter=" ")
tree = id3.build_tree(dataset)
assert_that(tree,
equals_tree(
{"Outlook": {
"Sunny": {
"Humidity": {
"High": "No",
"Normal": "Yes"
}
},
"Overcast": "Yes",
"Rain": {
"Wind": {
"Strong": "No",
"Weak": "Yes"
}
}
}}
)
)
def test_classify_play_tennis(self):
training = load(self.relative_to_base("/datasets/play_tennis.data"),
delimiter=" ")
classifier = DecisionTree(training)
sample = pd.Series(["Rain", "Cool", "High", "Strong"],
index=['Outlook', 'Temperature', 'Humidity',
'Wind'])
self.assertEqual(classifier.classify(sample), "No")
def test_classify_weekends(self):
training = load(self.relative_to_base("/datasets/weekends.data"))
classifier = DecisionTree(training)
sample = pd.Series(["windy", "no", "rich"],
index=['weather', 'parents', 'money'])
self.assertEqual(classifier.classify(sample), "shopping")
def test_classify_all_weekends(self):
training = load(self.relative_to_base("/datasets/weekends.data"))
classifier = DecisionTree(training)
index = ['weather', 'parents', 'money']
sample_0 = pd.Series(["windy", "no", "rich"], index=index)
sample_1 = pd.Series(["sunny", "yes", "rich"], index=index)
results = classifier.classify_all(
DataSet(pd.DataFrame([sample_0, sample_1])))
assert_that(results.get_classifications(),
equals_series({0: "shopping", 1: "cinema"}))
def test_data_has_value_not_in_training(self):
training = load(self.relative_to_base("/datasets/play_tennis.data"),
delimiter=" ")
classifier = DecisionTree(training)
# NOTE: Snowing is not a value of Outlook seen in the training set
sample = pd.Series({"Outlook": "Snowing", "Temperature": "Cool",
"Humidity": "Normal", "Wind": "Strong"})
assert_that(classifier.classify(sample), equal_to("Yes"))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
mit
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/pandas/tseries/tests/test_tslib.py
|
7
|
66821
|
import nose
from distutils.version import LooseVersion
import numpy as np
from pandas import tslib, lib
import pandas._period as period
import datetime
import pandas as pd
from pandas.core.api import (Timestamp, Index, Series, Timedelta, Period,
to_datetime)
from pandas.tslib import get_timezone
from pandas._period import period_asfreq, period_ordinal
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.frequencies import (
get_freq,
US_RESO, MS_RESO, S_RESO, H_RESO, D_RESO, T_RESO
)
import pandas.tseries.tools as tools
import pandas.tseries.offsets as offsets
import pandas.util.testing as tm
import pandas.compat as compat
from pandas.compat.numpy import (np_datetime64_compat,
np_array_datetime64_compat)
from pandas.util.testing import assert_series_equal, _skip_if_has_locale
class TestTsUtil(tm.TestCase):
def test_try_parse_dates(self):
from dateutil.parser import parse
arr = np.array(['5/1/2000', '6/1/2000', '7/1/2000'], dtype=object)
result = lib.try_parse_dates(arr, dayfirst=True)
expected = [parse(d, dayfirst=True) for d in arr]
self.assertTrue(np.array_equal(result, expected))
def test_min_valid(self):
# Ensure that Timestamp.min is a valid Timestamp
Timestamp(Timestamp.min)
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
def test_to_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
self.assertEqual(
Timestamp(Timestamp.max.to_pydatetime()).value / 1000,
Timestamp.max.value / 1000)
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
self.assertEqual(
Timestamp(Timestamp.min.to_pydatetime()).value / 1000,
Timestamp.min.value / 1000)
class TestTimestamp(tm.TestCase):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime.datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000,
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime.datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime.datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
import pytz
import dateutil
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# re-creation shouldn't affect to internal value
result = Timestamp(result)
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should preserve tz
result = Timestamp(result)
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
self.assertEqual(result.value, expected_utc)
self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime.datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000,
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
import pytz
import dateutil
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# re-creation shouldn't affect to internal value
result = Timestamp(result)
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should preserve tz
result = Timestamp(result)
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
self.assertEqual(result.value, expected_utc)
self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
self.assertEqual(repr(result), expected)
self.assertEqual(result, eval(repr(result)))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
self.assertEqual(repr(result), expected)
self.assertEqual(result, eval(repr(result)))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
self.assertEqual(result.value, Timestamp("2015-11-18 10:00").value)
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
self.assertEqual(repr(result), expected)
self.assertEqual(result, eval(repr(result)))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
self.assertEqual(result.value, Timestamp("2015-11-18 10:00").value)
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
self.assertEqual(repr(result), expected)
self.assertEqual(result, eval(repr(result)))
def test_constructor_invalid(self):
with tm.assertRaisesRegexp(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assertRaisesRegexp(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_positional(self):
# GH 10758
with tm.assertRaises(TypeError):
Timestamp(2000, 1)
with tm.assertRaises(ValueError):
Timestamp(2000, 0, 1)
with tm.assertRaises(ValueError):
Timestamp(2000, 13, 1)
with tm.assertRaises(ValueError):
Timestamp(2000, 1, 0)
with tm.assertRaises(ValueError):
Timestamp(2000, 1, 32)
# GH 11630
self.assertEqual(
repr(Timestamp(2015, 11, 12)),
repr(Timestamp('20151112')))
self.assertEqual(
repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)),
repr(Timestamp('2015-11-12 01:02:03.999999')))
self.assertIs(Timestamp(None), pd.NaT)
def test_constructor_keyword(self):
# GH 10758
with tm.assertRaises(TypeError):
Timestamp(year=2000, month=1)
with tm.assertRaises(ValueError):
Timestamp(year=2000, month=0, day=1)
with tm.assertRaises(ValueError):
Timestamp(year=2000, month=13, day=1)
with tm.assertRaises(ValueError):
Timestamp(year=2000, month=1, day=0)
with tm.assertRaises(ValueError):
Timestamp(year=2000, month=1, day=32)
self.assertEqual(
repr(Timestamp(year=2015, month=11, day=12)),
repr(Timestamp('20151112')))
self.assertEqual(
repr(Timestamp(year=2015, month=11, day=12,
hour=1, minute=2, second=3, microsecond=999999)),
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime.datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
self.assertEqual(base, ts)
self.assertEqual(ts.freq, 'D')
self.assertEqual(base.toordinal(), ts.toordinal())
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
self.assertEqual(pd.Timestamp('2000-01-01', tz='US/Eastern'), ts)
self.assertEqual(base.toordinal(), ts.toordinal())
def test_constructor_offset_depr(self):
# GH 12160
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
ts = Timestamp('2011-01-01', offset='D')
self.assertEqual(ts.freq, 'D')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
self.assertEqual(ts.offset, 'D')
msg = "Can only specify freq or offset, not both"
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01', offset='D', freq='D')
def test_constructor_offset_depr_fromordinal(self):
# GH 12160
base = datetime.datetime(2000, 1, 1)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
ts = Timestamp.fromordinal(base.toordinal(), offset='D')
self.assertEqual(pd.Timestamp('2000-01-01'), ts)
self.assertEqual(ts.freq, 'D')
self.assertEqual(base.toordinal(), ts.toordinal())
msg = "Can only specify freq or offset, not both"
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp.fromordinal(base.toordinal(), offset='D', freq='D')
def test_conversion(self):
# GH 9255
ts = Timestamp('2000-01-01')
result = ts.to_pydatetime()
expected = datetime.datetime(2000, 1, 1)
self.assertEqual(result, expected)
self.assertEqual(type(result), type(expected))
result = ts.to_datetime64()
expected = np.datetime64(ts.value, 'ns')
self.assertEqual(result, expected)
self.assertEqual(type(result), type(expected))
self.assertEqual(result.dtype, expected.dtype)
def test_repr(self):
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
dates = ['2014-03-07', '2014-01-01 09:00',
'2014-01-01 00:00:00.000000001']
# dateutil zone change (only matters for repr)
import dateutil
if (dateutil.__version__ >= LooseVersion('2.3') and
(dateutil.__version__ <= LooseVersion('2.4.0') or
dateutil.__version__ >= LooseVersion('2.6.0'))):
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/US/Pacific']
else:
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/America/Los_Angeles']
freqs = ['D', 'M', 'S', 'N']
for date in dates:
for tz in timezones:
for freq in freqs:
# avoid to match with timezone name
freq_repr = "'{0}'".format(freq)
if tz.startswith('dateutil'):
tz_repr = tz.replace('dateutil', '')
else:
tz_repr = tz
date_only = Timestamp(date)
self.assertIn(date, repr(date_only))
self.assertNotIn(tz_repr, repr(date_only))
self.assertNotIn(freq_repr, repr(date_only))
self.assertEqual(date_only, eval(repr(date_only)))
date_tz = Timestamp(date, tz=tz)
self.assertIn(date, repr(date_tz))
self.assertIn(tz_repr, repr(date_tz))
self.assertNotIn(freq_repr, repr(date_tz))
self.assertEqual(date_tz, eval(repr(date_tz)))
date_freq = Timestamp(date, freq=freq)
self.assertIn(date, repr(date_freq))
self.assertNotIn(tz_repr, repr(date_freq))
self.assertIn(freq_repr, repr(date_freq))
self.assertEqual(date_freq, eval(repr(date_freq)))
date_tz_freq = Timestamp(date, tz=tz, freq=freq)
self.assertIn(date, repr(date_tz_freq))
self.assertIn(tz_repr, repr(date_tz_freq))
self.assertIn(freq_repr, repr(date_tz_freq))
self.assertEqual(date_tz_freq, eval(repr(date_tz_freq)))
# this can cause the tz field to be populated, but it's redundant to
# information in the datestring
tm._skip_if_no_pytz()
import pytz # noqa
date_with_utc_offset = Timestamp('2014-03-13 00:00:00-0400', tz=None)
self.assertIn('2014-03-13 00:00:00-0400', repr(date_with_utc_offset))
self.assertNotIn('tzoffset', repr(date_with_utc_offset))
self.assertIn('pytz.FixedOffset(-240)', repr(date_with_utc_offset))
expr = repr(date_with_utc_offset).replace("'pytz.FixedOffset(-240)'",
'pytz.FixedOffset(-240)')
self.assertEqual(date_with_utc_offset, eval(expr))
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12', )
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
self.assertRaises(ValueError, Timestamp, np.datetime64(
date_string, dtype='M8[%s]' % unit))
in_bounds_dates = ('1677-09-23', '2262-04-11', )
for date_string in in_bounds_dates:
for unit in time_units:
Timestamp(np.datetime64(date_string, dtype='M8[%s]' % unit))
def test_tz(self):
t = '2014-02-01 09:00'
ts = Timestamp(t)
local = ts.tz_localize('Asia/Tokyo')
self.assertEqual(local.hour, 9)
self.assertEqual(local, Timestamp(t, tz='Asia/Tokyo'))
conv = local.tz_convert('US/Eastern')
self.assertEqual(conv, Timestamp('2014-01-31 19:00', tz='US/Eastern'))
self.assertEqual(conv.hour, 19)
# preserves nanosecond
ts = Timestamp(t) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
self.assertEqual(local.hour, 9)
self.assertEqual(local.nanosecond, 5)
conv = local.tz_convert('US/Eastern')
self.assertEqual(conv.nanosecond, 5)
self.assertEqual(conv.hour, 19)
def test_tz_localize_ambiguous(self):
ts = Timestamp('2014-11-02 01:00')
ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)
ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)
rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern')
self.assertEqual(rng[1], ts_dst)
self.assertEqual(rng[2], ts_no_dst)
self.assertRaises(ValueError, ts.tz_localize, 'US/Eastern',
ambiguous='infer')
# GH 8025
with tm.assertRaisesRegexp(TypeError,
'Cannot localize tz-aware Timestamp, use '
'tz_convert for conversions'):
Timestamp('2011-01-01', tz='US/Eastern').tz_localize('Asia/Tokyo')
with tm.assertRaisesRegexp(TypeError,
'Cannot convert tz-naive Timestamp, use '
'tz_localize to localize'):
Timestamp('2011-01-01').tz_convert('Asia/Tokyo')
def test_tz_localize_nonexistent(self):
# See issue 13057
from pytz.exceptions import NonExistentTimeError
times = ['2015-03-08 02:00', '2015-03-08 02:30',
'2015-03-29 02:00', '2015-03-29 02:30']
timezones = ['US/Eastern', 'US/Pacific',
'Europe/Paris', 'Europe/Belgrade']
for t, tz in zip(times, timezones):
ts = Timestamp(t)
self.assertRaises(NonExistentTimeError, ts.tz_localize,
tz)
self.assertRaises(NonExistentTimeError, ts.tz_localize,
tz, errors='raise')
self.assertIs(ts.tz_localize(tz, errors='coerce'),
pd.NaT)
def test_tz_localize_errors_ambiguous(self):
# See issue 13057
from pytz.exceptions import AmbiguousTimeError
ts = pd.Timestamp('2015-11-1 01:00')
self.assertRaises(AmbiguousTimeError,
ts.tz_localize, 'US/Pacific', errors='coerce')
def test_tz_localize_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
for t in ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00']:
ts = Timestamp(t)
localized = ts.tz_localize(tz)
self.assertEqual(localized, Timestamp(t, tz=tz))
with tm.assertRaises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
self.assertEqual(reset, ts)
self.assertTrue(reset.tzinfo is None)
def test_tz_convert_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
for t in ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00']:
ts = Timestamp(t, tz='UTC')
converted = ts.tz_convert(tz)
reset = converted.tz_convert(None)
self.assertEqual(reset, Timestamp(t))
self.assertTrue(reset.tzinfo is None)
self.assertEqual(reset,
converted.tz_convert('UTC').tz_localize(None))
def test_barely_oob_dts(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
self.assertRaises(ValueError, Timestamp, min_ts_us - one_us)
# One us more than the maximum is an error
self.assertRaises(ValueError, Timestamp, max_ts_us + one_us)
def test_utc_z_designator(self):
self.assertEqual(get_timezone(
Timestamp('2014-11-02 01:00Z').tzinfo), 'UTC')
def test_now(self):
# #9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
self.assertTrue(abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
self.assertTrue(abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000, ]
for n in ns:
self.assertEqual(Timestamp(n).asm8.view('i8'),
np.datetime64(n, 'ns').view('i8'), n)
self.assertEqual(Timestamp('nat').asm8.view('i8'),
np.datetime64('nat', 'ns').view('i8'))
def test_fields(self):
def check(value, equal):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
self.assertEqual(value, equal)
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
self.assertRaises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
def test_nat_fields(self):
# GH 10050
ts = Timestamp('NaT')
self.assertTrue(np.isnan(ts.year))
self.assertTrue(np.isnan(ts.month))
self.assertTrue(np.isnan(ts.day))
self.assertTrue(np.isnan(ts.hour))
self.assertTrue(np.isnan(ts.minute))
self.assertTrue(np.isnan(ts.second))
self.assertTrue(np.isnan(ts.microsecond))
self.assertTrue(np.isnan(ts.nanosecond))
self.assertTrue(np.isnan(ts.dayofweek))
self.assertTrue(np.isnan(ts.quarter))
self.assertTrue(np.isnan(ts.dayofyear))
self.assertTrue(np.isnan(ts.week))
self.assertTrue(np.isnan(ts.daysinmonth))
self.assertTrue(np.isnan(ts.days_in_month))
def test_pprint(self):
# GH12622
import pprint
nested_obj = {'foo': 1,
'bar': [{'w': {'a': Timestamp('2011-01-01')}}] * 10}
result = pprint.pformat(nested_obj, width=50)
expected = r"""{'bar': [{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}}],
'foo': 1}"""
self.assertEqual(result, expected)
def to_datetime_depr(self):
# see gh-8254
ts = Timestamp('2011-01-01')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = datetime.datetime(2011, 1, 1)
result = ts.to_datetime()
self.assertEqual(result, expected)
def to_pydatetime_nonzero_nano(self):
ts = Timestamp('2011-01-01 9:00:00.123456789')
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning,
check_stacklevel=False):
expected = datetime.datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.to_pydatetime()
self.assertEqual(result, expected)
class TestDatetimeParsingWrappers(tm.TestCase):
def test_does_not_convert_mixed_integer(self):
bad_date_strings = ('-50000', '999', '123.1234', 'm', 'T')
for bad_date_string in bad_date_strings:
self.assertFalse(tslib._does_string_look_like_datetime(
bad_date_string))
good_date_strings = ('2012-01-01',
'01/01/2012',
'Mon Sep 16, 2013',
'01012012',
'0101',
'1-1', )
for good_date_string in good_date_strings:
self.assertTrue(tslib._does_string_look_like_datetime(
good_date_string))
def test_parsers(self):
# https://github.com/dateutil/dateutil/issues/217
import dateutil
yearfirst = dateutil.__version__ >= LooseVersion('2.5.0')
cases = {'2011-01-01': datetime.datetime(2011, 1, 1),
'2Q2005': datetime.datetime(2005, 4, 1),
'2Q05': datetime.datetime(2005, 4, 1),
'2005Q1': datetime.datetime(2005, 1, 1),
'05Q1': datetime.datetime(2005, 1, 1),
'2011Q3': datetime.datetime(2011, 7, 1),
'11Q3': datetime.datetime(2011, 7, 1),
'3Q2011': datetime.datetime(2011, 7, 1),
'3Q11': datetime.datetime(2011, 7, 1),
# quarterly without space
'2000Q4': datetime.datetime(2000, 10, 1),
'00Q4': datetime.datetime(2000, 10, 1),
'4Q2000': datetime.datetime(2000, 10, 1),
'4Q00': datetime.datetime(2000, 10, 1),
'2000q4': datetime.datetime(2000, 10, 1),
'2000-Q4': datetime.datetime(2000, 10, 1),
'00-Q4': datetime.datetime(2000, 10, 1),
'4Q-2000': datetime.datetime(2000, 10, 1),
'4Q-00': datetime.datetime(2000, 10, 1),
'00q4': datetime.datetime(2000, 10, 1),
'2005': datetime.datetime(2005, 1, 1),
'2005-11': datetime.datetime(2005, 11, 1),
'2005 11': datetime.datetime(2005, 11, 1),
'11-2005': datetime.datetime(2005, 11, 1),
'11 2005': datetime.datetime(2005, 11, 1),
'200511': datetime.datetime(2020, 5, 11),
'20051109': datetime.datetime(2005, 11, 9),
'20051109 10:15': datetime.datetime(2005, 11, 9, 10, 15),
'20051109 08H': datetime.datetime(2005, 11, 9, 8, 0),
'2005-11-09 10:15': datetime.datetime(2005, 11, 9, 10, 15),
'2005-11-09 08H': datetime.datetime(2005, 11, 9, 8, 0),
'2005/11/09 10:15': datetime.datetime(2005, 11, 9, 10, 15),
'2005/11/09 08H': datetime.datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime.datetime(2003, 9, 25, 10,
36, 28),
"Thu Sep 25 2003": datetime.datetime(2003, 9, 25),
"Sep 25 2003": datetime.datetime(2003, 9, 25),
"January 1 2014": datetime.datetime(2014, 1, 1),
# GH 10537
'2014-06': datetime.datetime(2014, 6, 1),
'06-2014': datetime.datetime(2014, 6, 1),
'2014-6': datetime.datetime(2014, 6, 1),
'6-2014': datetime.datetime(2014, 6, 1),
'20010101 12': datetime.datetime(2001, 1, 1, 12),
'20010101 1234': datetime.datetime(2001, 1, 1, 12, 34),
'20010101 123456': datetime.datetime(2001, 1, 1, 12, 34, 56),
}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str,
yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(np.array([date_str], dtype=object),
yearfirst=yearfirst)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
self.assertEqual(res, expected)
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([pd.Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfist, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
self.assertEqual(result5, expected)
result7 = date_range(date_str, freq='S', periods=1,
yearfirst=yearfirst)
self.assertEqual(result7, expected)
# NaT
result1, _, _ = tools.parse_time_string('NaT')
result2 = to_datetime('NaT')
result3 = Timestamp('NaT')
result4 = DatetimeIndex(['NaT'])[0]
self.assertTrue(result1 is tslib.NaT)
self.assertTrue(result1 is tslib.NaT)
self.assertTrue(result1 is tslib.NaT)
self.assertTrue(result1 is tslib.NaT)
def test_parsers_quarter_invalid(self):
cases = ['2Q 2005', '2Q-200A', '2Q-200', '22Q2005', '6Q-20', '2Q200.']
for case in cases:
self.assertRaises(ValueError, tools.parse_time_string, case)
def test_parsers_dayfirst_yearfirst(self):
tm._skip_if_no_dateutil()
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
import dateutil
is_lt_253 = dateutil.__version__ < LooseVersion('2.5.3')
# str : dayfirst, yearfirst, expected
cases = {'10-11-12': [(False, False,
datetime.datetime(2012, 10, 11)),
(True, False,
datetime.datetime(2012, 11, 10)),
(False, True,
datetime.datetime(2010, 11, 12)),
(True, True,
datetime.datetime(2010, 12, 11))],
'20/12/21': [(False, False,
datetime.datetime(2021, 12, 20)),
(True, False,
datetime.datetime(2021, 12, 20)),
(False, True,
datetime.datetime(2020, 12, 21)),
(True, True,
datetime.datetime(2020, 12, 21))]}
from dateutil.parser import parse
for date_str, values in compat.iteritems(cases):
for dayfirst, yearfirst, expected in values:
# odd comparisons across version
# let's just skip
if dayfirst and yearfirst and is_lt_253:
continue
# compare with dateutil result
dateutil_result = parse(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
self.assertEqual(dateutil_result, expected)
result1, _, _ = tools.parse_time_string(date_str,
dayfirst=dayfirst,
yearfirst=yearfirst)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
self.assertEqual(result2, expected)
result3 = to_datetime(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
result4 = DatetimeIndex([date_str], dayfirst=dayfirst,
yearfirst=yearfirst)[0]
self.assertEqual(result1, expected)
self.assertEqual(result3, expected)
self.assertEqual(result4, expected)
def test_parsers_timestring(self):
tm._skip_if_no_dateutil()
from dateutil.parser import parse
# must be the same as dateutil result
cases = {'10:15': (parse('10:15'), datetime.datetime(1, 1, 1, 10, 15)),
'9:05': (parse('9:05'), datetime.datetime(1, 1, 1, 9, 5))}
for date_str, (exp_now, exp_def) in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
self.assertEqual(result1, exp_def)
self.assertEqual(result2, exp_now)
self.assertEqual(result3, exp_now)
self.assertEqual(result4, exp_now)
self.assertEqual(result5, exp_now)
def test_parsers_time(self):
# GH11818
_skip_if_has_locale()
strings = ["14:15", "1415", "2:15pm", "0215pm", "14:15:00", "141500",
"2:15:00pm", "021500pm", datetime.time(14, 15)]
expected = datetime.time(14, 15)
for time_string in strings:
self.assertEqual(tools.to_time(time_string), expected)
new_string = "14.15"
self.assertRaises(ValueError, tools.to_time, new_string)
self.assertEqual(tools.to_time(new_string, format="%H.%M"), expected)
arg = ["14:15", "20:20"]
expected_arr = [datetime.time(14, 15), datetime.time(20, 20)]
self.assertEqual(tools.to_time(arg), expected_arr)
self.assertEqual(tools.to_time(arg, format="%H:%M"), expected_arr)
self.assertEqual(tools.to_time(arg, infer_time_format=True),
expected_arr)
self.assertEqual(tools.to_time(arg, format="%I:%M%p", errors="coerce"),
[None, None])
res = tools.to_time(arg, format="%I:%M%p", errors="ignore")
self.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_))
with tm.assertRaises(ValueError):
tools.to_time(arg, format="%I:%M%p", errors="raise")
self.assert_series_equal(tools.to_time(Series(arg, name="test")),
Series(expected_arr, name="test"))
res = tools.to_time(np.array(arg))
self.assertIsInstance(res, list)
self.assert_equal(res, expected_arr)
def test_parsers_monthfreq(self):
cases = {'201101': datetime.datetime(2011, 1, 1, 0, 0),
'200005': datetime.datetime(2000, 5, 1, 0, 0)}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str, freq='M')
self.assertEqual(result1, expected)
def test_parsers_quarterly_with_freq(self):
msg = ('Incorrect quarterly string is given, quarter '
'must be between 1 and 4: 2013Q5')
with tm.assertRaisesRegexp(tslib.DateParseError, msg):
tools.parse_time_string('2013Q5')
# GH 5418
msg = ('Unable to retrieve month information from given freq: '
'INVLD-L-DEC-SAT')
with tm.assertRaisesRegexp(tslib.DateParseError, msg):
tools.parse_time_string('2013Q1', freq='INVLD-L-DEC-SAT')
cases = {('2013Q2', None): datetime.datetime(2013, 4, 1),
('2013Q2', 'A-APR'): datetime.datetime(2012, 8, 1),
('2013-Q2', 'A-DEC'): datetime.datetime(2013, 4, 1)}
for (date_str, freq), exp in compat.iteritems(cases):
result, _, _ = tools.parse_time_string(date_str, freq=freq)
self.assertEqual(result, exp)
def test_parsers_timezone_minute_offsets_roundtrip(self):
# GH11708
base = to_datetime("2013-01-01 00:00:00")
dt_strings = [
('2013-01-01 05:45+0545',
"Asia/Katmandu",
"Timestamp('2013-01-01 05:45:00+0545', tz='Asia/Katmandu')"),
('2013-01-01 05:30+0530',
"Asia/Kolkata",
"Timestamp('2013-01-01 05:30:00+0530', tz='Asia/Kolkata')")
]
for dt_string, tz, dt_string_repr in dt_strings:
dt_time = to_datetime(dt_string)
self.assertEqual(base, dt_time)
converted_time = dt_time.tz_localize('UTC').tz_convert(tz)
self.assertEqual(dt_string_repr, repr(converted_time))
def test_parsers_iso8601(self):
# GH 12060
# test only the iso parser - flexibility to different
# separators and leadings 0s
# Timestamp construction falls back to dateutil
cases = {'2011-01-02': datetime.datetime(2011, 1, 2),
'2011-1-2': datetime.datetime(2011, 1, 2),
'2011-01': datetime.datetime(2011, 1, 1),
'2011-1': datetime.datetime(2011, 1, 1),
'2011 01 02': datetime.datetime(2011, 1, 2),
'2011.01.02': datetime.datetime(2011, 1, 2),
'2011/01/02': datetime.datetime(2011, 1, 2),
'2011\\01\\02': datetime.datetime(2011, 1, 2),
'2013-01-01 05:30:00': datetime.datetime(2013, 1, 1, 5, 30),
'2013-1-1 5:30:00': datetime.datetime(2013, 1, 1, 5, 30)}
for date_str, exp in compat.iteritems(cases):
actual = tslib._test_parse_iso8601(date_str)
self.assertEqual(actual, exp)
# seperators must all match - YYYYMM not valid
invalid_cases = ['2011-01/02', '2011^11^11',
'201401', '201111', '200101',
# mixed separated and unseparated
'2005-0101', '200501-01',
'20010101 12:3456', '20010101 1234:56',
# HHMMSS must have two digits in each component
# if unseparated
'20010101 1', '20010101 123', '20010101 12345',
'20010101 12345Z',
# wrong separator for HHMMSS
'2001-01-01 12-34-56']
for date_str in invalid_cases:
with tm.assertRaises(ValueError):
tslib._test_parse_iso8601(date_str)
# If no ValueError raised, let me know which case failed.
raise Exception(date_str)
class TestArrayToDatetime(tm.TestCase):
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
'2013-01-02T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-09-16T00:00:00.000000000-0000',
'2013-09-17T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_number_looking_strings_not_into_datetime(self):
# #4601
# These strings don't look like datetimes so they shouldn't be
# attempted to be converted
arr = np.array(['-352.737091', '183.575577'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
arr = np.array(['1', '2', '3', '4', '5'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
invalid_dates = [
datetime.date(1000, 1, 1),
datetime.datetime(1000, 1, 1),
'1000-01-01',
'Jan 1, 1000',
np.datetime64('1000-01-01'),
]
for invalid_date in invalid_dates:
self.assertRaises(ValueError,
tslib.array_to_datetime,
np.array(
[invalid_date], dtype='object'),
errors='raise', )
self.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([invalid_date], dtype='object'),
errors='coerce'),
np.array([tslib.iNaT], dtype='M8[ns]')
)
arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
tslib.iNaT,
'2000-01-01T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_coerce_of_invalid_datetimes(self):
arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object)
# Without coercing, the presence of any invalid dates prevents
# any values from being converted
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
# With coercing, the invalid dates becomes iNaT
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
tslib.iNaT,
tslib.iNaT
],
dtype='M8[ns]'
)
)
def test_parsing_timezone_offsets(self):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added
dt_strings = [
'01-01-2013 08:00:00+08:00',
'2013-01-01T08:00:00.000000000+0800',
'2012-12-31T16:00:00.000000000-0800',
'12-31-2012 23:00:00-01:00'
]
expected_output = tslib.array_to_datetime(np.array(
['01-01-2013 00:00:00'], dtype=object))
for dt_string in dt_strings:
self.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([dt_string], dtype=object)
),
expected_output
)
class TestTimestampNsOperations(tm.TestCase):
def setUp(self):
self.timestamp = Timestamp(datetime.datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
self.assertEqual(modified_value - value, expected_value)
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'),
-123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(
1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'),
-123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
self.assertEqual(ts.value, expected_value - 9 * 3600 * 1000000000)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
self.assertEqual(ts.value, expected_value + 4 * 3600 * 1000000000)
self.assertIn(expected_repr, repr(ts))
# GH 10041
ts = Timestamp('20130501T071545.123456789')
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
t = Timestamp(t)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000005Z'))
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
expected = 1293840000000000010
t = t + offsets.Nano(5)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
t = Timestamp(t)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000010Z'))
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
def test_nat_arithmetic(self):
# GH 6873
i = 2
f = 1.5
for (left, right) in [(pd.NaT, i), (pd.NaT, f), (pd.NaT, np.nan)]:
self.assertIs(left / right, pd.NaT)
self.assertIs(left * right, pd.NaT)
self.assertIs(right * left, pd.NaT)
with tm.assertRaises(TypeError):
right / left
# Timestamp / datetime
t = Timestamp('2014-01-01')
dt = datetime.datetime(2014, 1, 1)
for (left, right) in [(pd.NaT, pd.NaT), (pd.NaT, t), (pd.NaT, dt)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
self.assertIs(right + left, pd.NaT)
self.assertIs(left + right, pd.NaT)
self.assertIs(left - right, pd.NaT)
self.assertIs(right - left, pd.NaT)
# timedelta-like
# offsets are tested in test_offsets.py
delta = datetime.timedelta(3600)
td = Timedelta('5s')
for (left, right) in [(pd.NaT, delta), (pd.NaT, td)]:
# NaT + timedelta-like returns NaT
self.assertIs(right + left, pd.NaT)
self.assertIs(left + right, pd.NaT)
self.assertIs(right - left, pd.NaT)
self.assertIs(left - right, pd.NaT)
# GH 11718
tm._skip_if_no_pytz()
import pytz
t_utc = Timestamp('2014-01-01', tz='UTC')
t_tz = Timestamp('2014-01-01', tz='US/Eastern')
dt_tz = pytz.timezone('Asia/Tokyo').localize(dt)
for (left, right) in [(pd.NaT, t_utc), (pd.NaT, t_tz),
(pd.NaT, dt_tz)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
self.assertIs(right + left, pd.NaT)
self.assertIs(left + right, pd.NaT)
self.assertIs(left - right, pd.NaT)
self.assertIs(right - left, pd.NaT)
# int addition / subtraction
for (left, right) in [(pd.NaT, 2), (pd.NaT, 0), (pd.NaT, -3)]:
self.assertIs(right + left, pd.NaT)
self.assertIs(left + right, pd.NaT)
self.assertIs(left - right, pd.NaT)
self.assertIs(right - left, pd.NaT)
def test_nat_arithmetic_index(self):
# GH 11718
# datetime
tm._skip_if_no_pytz()
dti = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], name='x')
exp = pd.DatetimeIndex([pd.NaT, pd.NaT], name='x')
self.assert_index_equal(dti + pd.NaT, exp)
self.assert_index_equal(pd.NaT + dti, exp)
dti_tz = pd.DatetimeIndex(['2011-01-01', '2011-01-02'],
tz='US/Eastern', name='x')
exp = pd.DatetimeIndex([pd.NaT, pd.NaT], name='x', tz='US/Eastern')
self.assert_index_equal(dti_tz + pd.NaT, exp)
self.assert_index_equal(pd.NaT + dti_tz, exp)
exp = pd.TimedeltaIndex([pd.NaT, pd.NaT], name='x')
for (left, right) in [(pd.NaT, dti), (pd.NaT, dti_tz)]:
self.assert_index_equal(left - right, exp)
self.assert_index_equal(right - left, exp)
# timedelta
tdi = pd.TimedeltaIndex(['1 day', '2 day'], name='x')
exp = pd.DatetimeIndex([pd.NaT, pd.NaT], name='x')
for (left, right) in [(pd.NaT, tdi)]:
self.assert_index_equal(left + right, exp)
self.assert_index_equal(right + left, exp)
self.assert_index_equal(left - right, exp)
self.assert_index_equal(right - left, exp)
class TestTslib(tm.TestCase):
def test_intraday_conversion_factors(self):
self.assertEqual(period_asfreq(
1, get_freq('D'), get_freq('H'), False), 24)
self.assertEqual(period_asfreq(
1, get_freq('D'), get_freq('T'), False), 1440)
self.assertEqual(period_asfreq(
1, get_freq('D'), get_freq('S'), False), 86400)
self.assertEqual(period_asfreq(1, get_freq(
'D'), get_freq('L'), False), 86400000)
self.assertEqual(period_asfreq(1, get_freq(
'D'), get_freq('U'), False), 86400000000)
self.assertEqual(period_asfreq(1, get_freq(
'D'), get_freq('N'), False), 86400000000000)
self.assertEqual(period_asfreq(
1, get_freq('H'), get_freq('T'), False), 60)
self.assertEqual(period_asfreq(
1, get_freq('H'), get_freq('S'), False), 3600)
self.assertEqual(period_asfreq(1, get_freq('H'),
get_freq('L'), False), 3600000)
self.assertEqual(period_asfreq(1, get_freq(
'H'), get_freq('U'), False), 3600000000)
self.assertEqual(period_asfreq(1, get_freq(
'H'), get_freq('N'), False), 3600000000000)
self.assertEqual(period_asfreq(
1, get_freq('T'), get_freq('S'), False), 60)
self.assertEqual(period_asfreq(
1, get_freq('T'), get_freq('L'), False), 60000)
self.assertEqual(period_asfreq(1, get_freq(
'T'), get_freq('U'), False), 60000000)
self.assertEqual(period_asfreq(1, get_freq(
'T'), get_freq('N'), False), 60000000000)
self.assertEqual(period_asfreq(
1, get_freq('S'), get_freq('L'), False), 1000)
self.assertEqual(period_asfreq(1, get_freq('S'),
get_freq('U'), False), 1000000)
self.assertEqual(period_asfreq(1, get_freq(
'S'), get_freq('N'), False), 1000000000)
self.assertEqual(period_asfreq(
1, get_freq('L'), get_freq('U'), False), 1000)
self.assertEqual(period_asfreq(1, get_freq('L'),
get_freq('N'), False), 1000000)
self.assertEqual(period_asfreq(
1, get_freq('U'), get_freq('N'), False), 1000)
def test_period_ordinal_start_values(self):
# information for 1.1.1970
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('A')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('M')))
self.assertEqual(1, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('W')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('D')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('B')))
def test_period_ordinal_week(self):
self.assertEqual(1, period_ordinal(1970, 1, 4, 0, 0, 0, 0, 0,
get_freq('W')))
self.assertEqual(2, period_ordinal(1970, 1, 5, 0, 0, 0, 0, 0,
get_freq('W')))
self.assertEqual(2284, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0,
get_freq('W')))
self.assertEqual(2285, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0,
get_freq('W')))
def test_period_ordinal_business_day(self):
# Thursday
self.assertEqual(11415, period_ordinal(2013, 10, 3, 0, 0, 0, 0, 0,
get_freq('B')))
# Friday
self.assertEqual(11416, period_ordinal(2013, 10, 4, 0, 0, 0, 0, 0,
get_freq('B')))
# Saturday
self.assertEqual(11417, period_ordinal(2013, 10, 5, 0, 0, 0, 0, 0,
get_freq('B')))
# Sunday
self.assertEqual(11417, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0,
get_freq('B')))
# Monday
self.assertEqual(11417, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0,
get_freq('B')))
# Tuesday
self.assertEqual(11418, period_ordinal(2013, 10, 8, 0, 0, 0, 0, 0,
get_freq('B')))
def test_tslib_tz_convert(self):
def compare_utc_to_local(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz)
result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)
result_single = np.vectorize(f)(tz_didx.asi8)
self.assert_numpy_array_equal(result, result_single)
def compare_local_to_utc(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC')
result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')
result_single = np.vectorize(f)(utc_didx.asi8)
self.assert_numpy_array_equal(result, result_single)
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']:
# US: 2014-03-09 - 2014-11-11
# MOSCOW: 2014-10-26 / 2014-12-31
tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz)
utc_didx = date_range('2014-03-01', '2015-01-10', freq='H')
compare_utc_to_local(tz_didx, utc_didx)
# local tz to UTC can be differ in hourly (or higher) freqs because
# of DST
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz)
utc_didx = date_range('2000-01-01', '2020-01-01', freq='D')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz)
utc_didx = date_range('2000-01-01', '2100-01-01', freq='A')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
# Check empty array
result = tslib.tz_convert(np.array([], dtype=np.int64),
tslib.maybe_get_tz('US/Eastern'),
tslib.maybe_get_tz('Asia/Tokyo'))
self.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
# Check all-NaT array
result = tslib.tz_convert(np.array([tslib.iNaT], dtype=np.int64),
tslib.maybe_get_tz('US/Eastern'),
tslib.maybe_get_tz('Asia/Tokyo'))
self.assert_numpy_array_equal(result, np.array(
[tslib.iNaT], dtype=np.int64))
def test_shift_months(self):
s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'), Timestamp(
'2000-01-31 00:23:00'), Timestamp('2000-01-01'), Timestamp(
'2000-02-29'), Timestamp('2000-12-31')])
for years in [-1, 0, 1]:
for months in [-2, 0, 2]:
actual = DatetimeIndex(tslib.shift_months(s.asi8, years * 12 +
months))
expected = DatetimeIndex([x + offsets.DateOffset(
years=years, months=months) for x in s])
tm.assert_index_equal(actual, expected)
def test_round(self):
stamp = Timestamp('2000-01-05 05:09:15.13')
def _check_round(freq, expected):
result = stamp.round(freq=freq)
self.assertEqual(result, expected)
for freq, expected in [('D', Timestamp('2000-01-05 00:00:00')),
('H', Timestamp('2000-01-05 05:00:00')),
('S', Timestamp('2000-01-05 05:09:15'))]:
_check_round(freq, expected)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
stamp.round('foo')
class TestTimestampOps(tm.TestCase):
def test_timestamp_and_datetime(self):
self.assertEqual((Timestamp(datetime.datetime(
2013, 10, 13)) - datetime.datetime(2013, 10, 12)).days, 1)
self.assertEqual((datetime.datetime(2013, 10, 12) -
Timestamp(datetime.datetime(2013, 10, 13))).days, -1)
def test_timestamp_and_series(self):
timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D',
tz='US/Eastern'))
first_timestamp = timestamp_series[0]
delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')])
assert_series_equal(timestamp_series - first_timestamp, delta_series)
assert_series_equal(first_timestamp - timestamp_series, -delta_series)
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time
# objects
datetime_instance = datetime.datetime(2014, 3, 4)
timedelta_instance = datetime.timedelta(seconds=1)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1,
freq='D')[0]
self.assertEqual(type(timestamp_instance + 1), Timestamp)
self.assertEqual(type(timestamp_instance - 1), Timestamp)
# Timestamp + datetime not supported, though subtraction is supported
# and yields timedelta more tests in tseries/base/tests/test_base.py
self.assertEqual(
type(timestamp_instance - datetime_instance), Timedelta)
self.assertEqual(
type(timestamp_instance + timedelta_instance), Timestamp)
self.assertEqual(
type(timestamp_instance - timedelta_instance), Timestamp)
# Timestamp +/- datetime64 not supported, so not tested (could possibly
# assert error raised?)
timedelta64_instance = np.timedelta64(1, 'D')
self.assertEqual(
type(timestamp_instance + timedelta64_instance), Timestamp)
self.assertEqual(
type(timestamp_instance - timedelta64_instance), Timestamp)
def test_addition_subtraction_preserve_frequency(self):
timestamp_instance = date_range('2014-03-05', periods=1, freq='D')[0]
timedelta_instance = datetime.timedelta(days=1)
original_freq = timestamp_instance.freq
self.assertEqual((timestamp_instance + 1).freq, original_freq)
self.assertEqual((timestamp_instance - 1).freq, original_freq)
self.assertEqual(
(timestamp_instance + timedelta_instance).freq, original_freq)
self.assertEqual(
(timestamp_instance - timedelta_instance).freq, original_freq)
timedelta64_instance = np.timedelta64(1, 'D')
self.assertEqual(
(timestamp_instance + timedelta64_instance).freq, original_freq)
self.assertEqual(
(timestamp_instance - timedelta64_instance).freq, original_freq)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
[D_RESO, D_RESO,
D_RESO, D_RESO,
H_RESO, T_RESO,
S_RESO, MS_RESO,
US_RESO]):
for tz in [None, 'Asia/Tokyo', 'US/Eastern',
'dateutil/US/Eastern']:
idx = date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
result = period.resolution(idx.asi8, idx.tz)
self.assertEqual(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
ProjectsUCSC/NLP
|
Assignment 2/topicModel.py
|
1
|
2416
|
from lda import lda_tm
import numpy as np
import pandas as pd
import btm
import gensim
from gensim import corpora, models
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from time import gmtime, strftime
import numpy as np
#usage
#csv_name = "Homework2_data.csv"
#df = pp.preprocess(csv_name)
def run_btm_tm(csv_name, df, num_topics , top_topics ):
print "================BTM===================="
print "running topic modelling using btm"
btm_dir = "OnlineBTM" #directory where btm code is .
btm.run_btm(btm_dir, csv_name, df, num_topics, top_topics)
print "topic modelling using btm complete."
def run_lda_tm(texts, no_topics, top_words):
print "================LDA===================="
print "running topic modelling using LDA"
lda_output = "lda_output.txt"
# texts = ["dog cat banana", "fruit apple bird wolf lion", "orange tomato"]
texts = [[word for word in text.split()] for text in texts]
# print texts
dictionary = corpora.Dictionary(texts)
# for d in dictionary.keys():
# print d, dictionary[d]
corpus = [dictionary.doc2bow(text) for text in texts]
# print corpus
model = gensim.models.ldamodel.LdaModel(corpus, id2word=dictionary, num_topics=no_topics, update_every=1, passes=1)
print "number of topics = ", no_topics
# topics = model.show_topics(num_topics=no_topics, num_words=top_words)
# print "These are our topics\n", topics
f = open(lda_output, "w")
for i in range(no_topics):
words = model.get_topic_terms(i, top_words);
f.write("Topic " + str(i) + ":\n")
for pair in words:
f.write(str(dictionary[pair[0]]) + " " + str(pair[1]) + ",")
f.write("\n")
f.close()
return
#get_document_topics(bow, minimum_probability=None, minimum_phi_value=None, per_word_topics=False)
#Return topic distribution for the given document bow, as a list of (topic_id, topic_probability) 2-tuples.
#Ignore topics with very low probability (below minimum_probability).
#get_term_topics(word_id, minimum_probability=None)
#Returns most likely topics for a particular word in vocab.
#get_topic_terms(topicid, topn=10)
#Return a list of (word_id, probability) 2-tuples for the most probable words in topic topicid.
#Only return 2-tuples for the topn most probable words (ignore the rest).
|
mit
|
beiko-lab/gengis
|
bin/Lib/site-packages/mpl_toolkits/basemap/__init__.py
|
1
|
254201
|
"""
Module for plotting data on maps with matplotlib.
Contains the :class:`Basemap` class (which does most of the
heavy lifting), and the following functions:
:func:`interp`: bilinear interpolation between rectilinear grids.
:func:`maskoceans`: mask 'wet' points of an input array.
:func:`shiftgrid`: shifts global lat/lon grids east or west.
:func:`addcyclic`: Add cyclic (wraparound) point in longitude.
"""
from matplotlib import __version__ as _matplotlib_version
from matplotlib.cbook import is_scalar, dedent
# check to make sure matplotlib is not too old.
_mpl_required_version = '0.98'
if _matplotlib_version < _mpl_required_version:
msg = dedent("""
your matplotlib is too old - basemap requires version %s or
higher, you have version %s""" %
(_mpl_required_version,_matplotlib_version))
raise ImportError(msg)
from matplotlib import rcParams, is_interactive
from matplotlib.collections import LineCollection
from matplotlib.patches import Ellipse, Circle, Polygon, FancyArrowPatch
from matplotlib.lines import Line2D
from matplotlib.transforms import Bbox
from mpl_toolkits.basemap import pyproj
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.image import imread
import sys, os, math
from .proj import Proj
import numpy as np
import numpy.ma as ma
import _geoslib
import functools
# basemap data files now installed in lib/matplotlib/toolkits/basemap/data
# check to see if environment variable BASEMAPDATA set to a directory,
# and if so look for the data there.
if 'BASEMAPDATA' in os.environ:
basemap_datadir = os.environ['BASEMAPDATA']
if not os.path.isdir(basemap_datadir):
raise RuntimeError('Path in environment BASEMAPDATA not a directory')
else:
basemap_datadir = os.sep.join([os.path.dirname(__file__), 'data'])
__version__ = '1.0.7'
# module variable that sets the default value for the 'latlon' kwarg.
# can be set to True by user so plotting functions can take lons,lats
# in degrees by default, instead of x,y (map projection coords in meters).
latlon_default = False
# supported map projections.
_projnames = {'cyl' : 'Cylindrical Equidistant',
'merc' : 'Mercator',
'tmerc' : 'Transverse Mercator',
'omerc' : 'Oblique Mercator',
'mill' : 'Miller Cylindrical',
'gall' : 'Gall Stereographic Cylindrical',
'cea' : 'Cylindrical Equal Area',
'lcc' : 'Lambert Conformal',
'laea' : 'Lambert Azimuthal Equal Area',
'nplaea' : 'North-Polar Lambert Azimuthal',
'splaea' : 'South-Polar Lambert Azimuthal',
'eqdc' : 'Equidistant Conic',
'aeqd' : 'Azimuthal Equidistant',
'npaeqd' : 'North-Polar Azimuthal Equidistant',
'spaeqd' : 'South-Polar Azimuthal Equidistant',
'aea' : 'Albers Equal Area',
'stere' : 'Stereographic',
'npstere' : 'North-Polar Stereographic',
'spstere' : 'South-Polar Stereographic',
'cass' : 'Cassini-Soldner',
'poly' : 'Polyconic',
'ortho' : 'Orthographic',
'geos' : 'Geostationary',
'nsper' : 'Near-Sided Perspective',
'sinu' : 'Sinusoidal',
'moll' : 'Mollweide',
'hammer' : 'Hammer',
'robin' : 'Robinson',
'kav7' : 'Kavrayskiy VII',
'eck4' : 'Eckert IV',
'vandg' : 'van der Grinten',
'mbtfpq' : 'McBryde-Thomas Flat-Polar Quartic',
'gnom' : 'Gnomonic',
'rotpole' : 'Rotated Pole',
}
supported_projections = []
for _items in _projnames.items():
supported_projections.append(" %-17s%-40s\n" % (_items))
supported_projections = ''.join(supported_projections)
_cylproj = ['cyl','merc','mill','gall','cea']
_pseudocyl = ['moll','robin','eck4','kav7','sinu','mbtfpq','vandg','hammer']
_dg2rad = math.radians(1.)
_rad2dg = math.degrees(1.)
# projection specific parameters.
projection_params = {'cyl' : 'corners only (no width/height)',
'merc' : 'corners plus lat_ts (no width/height)',
'tmerc' : 'lon_0,lat_0,k_0',
'omerc' : 'lon_0,lat_0,lat_1,lat_2,lon_1,lon_2,no_rot,k_0',
'mill' : 'corners only (no width/height)',
'gall' : 'corners only (no width/height)',
'cea' : 'corners only plus lat_ts (no width/height)',
'lcc' : 'lon_0,lat_0,lat_1,lat_2,k_0',
'laea' : 'lon_0,lat_0',
'nplaea' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'splaea' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'eqdc' : 'lon_0,lat_0,lat_1,lat_2',
'aeqd' : 'lon_0,lat_0',
'npaeqd' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'spaeqd' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'aea' : 'lon_0,lat_0,lat_1',
'stere' : 'lon_0,lat_0,lat_ts,k_0',
'npstere' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'spstere' : 'bounding_lat,lon_0,lat_0,no corners or width/height',
'cass' : 'lon_0,lat_0',
'poly' : 'lon_0,lat_0',
'ortho' : 'lon_0,lat_0,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height',
'geos' : 'lon_0,satellite_height,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height',
'nsper' : 'lon_0,satellite_height,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height',
'sinu' : 'lon_0,lat_0,no corners or width/height',
'moll' : 'lon_0,lat_0,no corners or width/height',
'hammer' : 'lon_0,lat_0,no corners or width/height',
'robin' : 'lon_0,lat_0,no corners or width/height',
'eck4' : 'lon_0,lat_0,no corners or width/height',
'kav7' : 'lon_0,lat_0,no corners or width/height',
'vandg' : 'lon_0,lat_0,no corners or width/height',
'mbtfpq' : 'lon_0,lat_0,no corners or width/height',
'gnom' : 'lon_0,lat_0',
'rotpole' : 'lon_0,o_lat_p,o_lon_p,corner lat/lon or corner x,y (no width/height)'
}
# create dictionary that maps epsg codes to Basemap kwargs.
epsgf = open(os.path.join(basemap_datadir,'epsg'))
epsg_dict={}
for line in epsgf:
if line.startswith("#"):
continue
l = line.split()
code = l[0].strip("<>")
parms = ' '.join(l[1:-1])
_kw_args={}
for s in l[1:-1]:
try:
k,v = s.split('=')
except:
pass
k = k.strip("+")
if k=='proj':
if v == 'longlat': v = 'cyl'
if v not in _projnames:
continue
k='projection'
if k=='k':
k='k_0'
if k in ['projection','lat_1','lat_2','lon_0','lat_0',\
'a','b','k_0','lat_ts','ellps','datum']:
if k not in ['projection','ellps','datum']:
v = float(v)
_kw_args[k]=v
if 'projection' in _kw_args:
if 'a' in _kw_args:
if 'b' in _kw_args:
_kw_args['rsphere']=(_kw_args['a'],_kw_args['b'])
del _kw_args['b']
else:
_kw_args['rsphere']=_kw_args['a']
del _kw_args['a']
if 'datum' in _kw_args:
if _kw_args['datum'] == 'NAD83':
_kw_args['ellps'] = 'GRS80'
elif _kw_args['datum'] == 'NAD27':
_kw_args['ellps'] = 'clrk66'
elif _kw_args['datum'] == 'WGS84':
_kw_args['ellps'] = 'WGS84'
del _kw_args['datum']
# supported epsg projections.
# omerc not supported yet, since we can't handle
# alpha,gamma and lonc keywords.
if _kw_args['projection'] != 'omerc':
epsg_dict[code]=_kw_args
epsgf.close()
# The __init__ docstring is pulled out here because it is so long;
# Having it in the usual place makes it hard to get from the
# __init__ argument list to the code that uses the arguments.
_Basemap_init_doc = """
Sets up a basemap with specified map projection.
and creates the coastline data structures in map projection
coordinates.
Calling a Basemap class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y map projection coordinates
(in meters). The inverse transformation is done if the optional keyword
``inverse`` is set to True.
The desired projection is set with the projection keyword. Default is ``cyl``.
Supported values for the projection keyword are:
============== ====================================================
Value Description
============== ====================================================
%(supported_projections)s
============== ====================================================
For most map projections, the map projection region can either be
specified by setting these keywords:
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
llcrnrlon longitude of lower left hand corner of the desired map
domain (degrees).
llcrnrlat latitude of lower left hand corner of the desired map
domain (degrees).
urcrnrlon longitude of upper right hand corner of the desired map
domain (degrees).
urcrnrlat latitude of upper right hand corner of the desired map
domain (degrees).
============== ====================================================
or these
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
width width of desired map domain in projection coordinates
(meters).
height height of desired map domain in projection coordinates
(meters).
lon_0 center of desired map domain (in degrees).
lat_0 center of desired map domain (in degrees).
============== ====================================================
For ``sinu``, ``moll``, ``hammer``, ``npstere``, ``spstere``, ``nplaea``, ``splaea``,
``npaeqd``, ``spaeqd``, ``robin``, ``eck4``, ``kav7``, or ``mbtfpq``, the values of
llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat, width and height are ignored
(because either they are computed internally, or entire globe is
always plotted).
For the cylindrical projections (``cyl``, ``merc``, ``mill``, ``cea`` and ``gall``),
the default is to use
llcrnrlon=-180,llcrnrlat=-90, urcrnrlon=180 and urcrnrlat=90). For all other
projections except ``ortho``, ``geos`` and ``nsper``, either the lat/lon values of the
corners or width and height must be specified by the user.
For ``ortho``, ``geos`` and ``nsper``, the lat/lon values of the corners may be specified,
or the x/y values of the corners (llcrnrx,llcrnry,urcrnrx,urcrnry) in the
coordinate system of the global projection (with x=0,y=0 at the center
of the global projection). If the corners are not specified,
the entire globe is plotted.
For ``rotpole``, the lat/lon values of the corners on the unrotated sphere
may be provided as llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat, or the lat/lon
values of the corners on the rotated sphere can be given as
llcrnrx,llcrnry,urcrnrx,urcrnry.
Other keyword arguments:
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
resolution resolution of boundary database to use. Can be ``c``
(crude), ``l`` (low), ``i`` (intermediate), ``h``
(high), ``f`` (full) or None.
If None, no boundary data will be read in (and
class methods such as drawcoastlines will raise an
if invoked).
Resolution drops off by roughly 80%% between datasets.
Higher res datasets are much slower to draw.
Default ``c``. Coastline data is from the GSHHS
(http://www.soest.hawaii.edu/wessel/gshhs/gshhs.html).
State, country and river datasets from the Generic
Mapping Tools (http://gmt.soest.hawaii.edu).
area_thresh coastline or lake with an area smaller than
area_thresh in km^2 will not be plotted.
Default 10000,1000,100,10,1 for resolution
``c``, ``l``, ``i``, ``h``, ``f``.
rsphere radius of the sphere used to define map projection
(default 6370997 meters, close to the arithmetic mean
radius of the earth). If given as a sequence, the
first two elements are interpreted as the radii
of the major and minor axes of an ellipsoid.
Note: sometimes an ellipsoid is specified by the
major axis and an inverse flattening parameter (if).
The minor axis (b) can be computed from the major
axis (a) and the inverse flattening parameter using
the formula if = a/(a-b).
ellps string describing ellipsoid ('GRS80' or 'WGS84',
for example). If both rsphere and ellps are given,
rsphere is ignored. Default None. See pyproj.pj_ellps
for allowed values.
suppress_ticks suppress automatic drawing of axis ticks and labels
in map projection coordinates. Default False,
so parallels and meridians can be labelled instead.
If parallel or meridian labelling is requested
(using drawparallels and drawmeridians methods),
automatic tick labelling will be supressed even if
suppress_ticks=False. suppress_ticks=False
is useful if you want to use your own custom tick
formatter, or if you want to let matplotlib label
the axes in meters using map projection
coordinates.
fix_aspect fix aspect ratio of plot to match aspect ratio
of map projection region (default True).
anchor determines how map is placed in axes rectangle
(passed to axes.set_aspect). Default is ``C``,
which means map is centered.
Allowed values are
``C``, ``SW``, ``S``, ``SE``, ``E``, ``NE``,
``N``, ``NW``, and ``W``.
celestial use astronomical conventions for longitude (i.e.
negative longitudes to the east of 0). Default False.
Implies resolution=None.
ax set default axes instance
(default None - matplotlib.pyplot.gca() may be used
to get the current axes instance).
If you don not want matplotlib.pyplot to be imported,
you can either set this to a pre-defined axes
instance, or use the ``ax`` keyword in each Basemap
method call that does drawing. In the first case,
all Basemap method calls will draw to the same axes
instance. In the second case, you can draw to
different axes with the same Basemap instance.
You can also use the ``ax`` keyword in individual
method calls to selectively override the default
axes instance.
============== ====================================================
The following keywords are map projection parameters which all default to
None. Not all parameters are used by all projections, some are ignored.
The module variable ``projection_params`` is a dictionary which
lists which parameters apply to which projections.
.. tabularcolumns:: |l|L|
================ ====================================================
Keyword Description
================ ====================================================
lat_ts latitude of true scale. Optional for stereographic,
cylindrical equal area and mercator projections.
default is lat_0 for stereographic projection.
default is 0 for mercator and cylindrical equal area
projections.
lat_1 first standard parallel for lambert conformal,
albers equal area and equidistant conic.
Latitude of one of the two points on the projection
centerline for oblique mercator. If lat_1 is not given, but
lat_0 is, lat_1 is set to lat_0 for lambert
conformal, albers equal area and equidistant conic.
lat_2 second standard parallel for lambert conformal,
albers equal area and equidistant conic.
Latitude of one of the two points on the projection
centerline for oblique mercator. If lat_2 is not
given it is set to lat_1 for lambert conformal,
albers equal area and equidistant conic.
lon_1 Longitude of one of the two points on the projection
centerline for oblique mercator.
lon_2 Longitude of one of the two points on the projection
centerline for oblique mercator.
k_0 Scale factor at natural origin (used
by 'tmerc', 'omerc', 'stere' and 'lcc').
no_rot only used by oblique mercator.
If set to True, the map projection coordinates will
not be rotated to true North. Default is False
(projection coordinates are automatically rotated).
lat_0 central latitude (y-axis origin) - used by all
projections.
lon_0 central meridian (x-axis origin) - used by all
projections.
boundinglat bounding latitude for pole-centered projections
(npstere,spstere,nplaea,splaea,npaeqd,spaeqd).
These projections are square regions centered
on the north or south pole.
The longitude lon_0 is at 6-o'clock, and the
latitude circle boundinglat is tangent to the edge
of the map at lon_0.
round cut off pole-centered projection at boundinglat
(so plot is a circle instead of a square). Only
relevant for npstere,spstere,nplaea,splaea,npaeqd
or spaeqd projections. Default False.
satellite_height height of satellite (in m) above equator -
only relevant for geostationary
and near-sided perspective (``geos`` or ``nsper``)
projections. Default 35,786 km.
================ ====================================================
Useful instance variables:
.. tabularcolumns:: |l|L|
================ ====================================================
Variable Name Description
================ ====================================================
projection map projection. Print the module variable
``supported_projections`` to see a list of allowed
values.
epsg EPSG code defining projection (see
http://spatialreference.org for a list of
EPSG codes and their definitions).
aspect map aspect ratio
(size of y dimension / size of x dimension).
llcrnrlon longitude of lower left hand corner of the
selected map domain.
llcrnrlat latitude of lower left hand corner of the
selected map domain.
urcrnrlon longitude of upper right hand corner of the
selected map domain.
urcrnrlat latitude of upper right hand corner of the
selected map domain.
llcrnrx x value of lower left hand corner of the
selected map domain in map projection coordinates.
llcrnry y value of lower left hand corner of the
selected map domain in map projection coordinates.
urcrnrx x value of upper right hand corner of the
selected map domain in map projection coordinates.
urcrnry y value of upper right hand corner of the
selected map domain in map projection coordinates.
rmajor equatorial radius of ellipsoid used (in meters).
rminor polar radius of ellipsoid used (in meters).
resolution resolution of boundary dataset being used (``c``
for crude, ``l`` for low, etc.).
If None, no boundary dataset is associated with the
Basemap instance.
proj4string the string describing the map projection that is
used by PROJ.4.
================ ====================================================
**Converting from Geographic (lon/lat) to Map Projection (x/y) Coordinates**
Calling a Basemap class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y map projection
coordinates (in meters). If optional keyword ``inverse`` is
True (default is False), the inverse transformation from x/y
to lon/lat is performed.
For cylindrical equidistant projection (``cyl``), this
does nothing (i.e. x,y == lon,lat).
For non-cylindrical projections, the inverse transformation
always returns longitudes between -180 and 180 degrees. For
cylindrical projections (self.projection == ``cyl``, ``mill``,
``cea``, ``gall`` or ``merc``)
the inverse transformation will return longitudes between
self.llcrnrlon and self.llcrnrlat.
Input arguments lon, lat can be either scalar floats, sequences
or numpy arrays.
**Example Usage:**
>>> from mpl_toolkits.basemap import Basemap
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> # read in topo data (on a regular lat/lon grid)
>>> etopo = np.loadtxt('etopo20data.gz')
>>> lons = np.loadtxt('etopo20lons.gz')
>>> lats = np.loadtxt('etopo20lats.gz')
>>> # create Basemap instance for Robinson projection.
>>> m = Basemap(projection='robin',lon_0=0.5*(lons[0]+lons[-1]))
>>> # compute map projection coordinates for lat/lon grid.
>>> x, y = m(*np.meshgrid(lons,lats))
>>> # make filled contour plot.
>>> cs = m.contourf(x,y,etopo,30,cmap=plt.cm.jet)
>>> m.drawcoastlines() # draw coastlines
>>> m.drawmapboundary() # draw a line around the map region
>>> m.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0]) # draw parallels
>>> m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,1]) # draw meridians
>>> plt.title('Robinson Projection') # add a title
>>> plt.show()
[this example (simpletest.py) plus many others can be found in the
examples directory of source distribution. The "OO" version of this
example (which does not use matplotlib.pyplot) is called "simpletest_oo.py".]
""" % locals()
# unsupported projection error message.
_unsupported_projection = ["'%s' is an unsupported projection.\n"]
_unsupported_projection.append("The supported projections are:\n")
_unsupported_projection.append(supported_projections)
_unsupported_projection = ''.join(_unsupported_projection)
def _validated_ll(param, name, minval, maxval):
param = float(param)
if param > maxval or param < minval:
raise ValueError('%s must be between %f and %f degrees' %
(name, minval, maxval))
return param
def _insert_validated(d, param, name, minval, maxval):
if param is not None:
d[name] = _validated_ll(param, name, minval, maxval)
def _transform(plotfunc):
# shift data and longitudes to map projection region, then compute
# transformation to map projection coordinates.
@functools.wraps(plotfunc)
def with_transform(self,x,y,data,*args,**kwargs):
# input coordinates are latitude/longitude, not map projection coords.
if kwargs.pop('latlon', latlon_default):
# shift data to map projection region for
# cylindrical and pseudo-cylindrical projections.
if self.projection in _cylproj or self.projection in _pseudocyl:
x, data = self.shiftdata(x, data)
# convert lat/lon coords to map projection coords.
x, y = self(x,y)
return plotfunc(self,x,y,data,*args,**kwargs)
return with_transform
def _transform1d(plotfunc):
# shift data and longitudes to map projection region, then compute
# transformation to map projection coordinates.
@functools.wraps(plotfunc)
def with_transform(self,x,y,*args,**kwargs):
x = np.asarray(x)
# input coordinates are latitude/longitude, not map projection coords.
if kwargs.pop('latlon', latlon_default):
# shift data to map projection region for
# cylindrical and pseudo-cylindrical projections.
if self.projection in _cylproj or self.projection in _pseudocyl:
if x.ndim == 1:
x = self.shiftdata(x)
elif x.ndim == 0:
if x > 180:
x = x - 360.
# convert lat/lon coords to map projection coords.
x, y = self(x,y)
return plotfunc(self,x,y,*args,**kwargs)
return with_transform
def _transformuv(plotfunc):
# shift data and longitudes to map projection region, then compute
# transformation to map projection coordinates. Works when call
# signature has two data arrays instead of one.
@functools.wraps(plotfunc)
def with_transform(self,x,y,u,v,*args,**kwargs):
# input coordinates are latitude/longitude, not map projection coords.
if kwargs.pop('latlon', latlon_default):
# shift data to map projection region for
# cylindrical and pseudo-cylindrical projections.
if self.projection in _cylproj or self.projection in _pseudocyl:
x1, u = self.shiftdata(x, u)
x, v = self.shiftdata(x, v)
# convert lat/lon coords to map projection coords.
x, y = self(x,y)
return plotfunc(self,x,y,u,v,*args,**kwargs)
return with_transform
class Basemap(object):
def __init__(self, llcrnrlon=None, llcrnrlat=None,
urcrnrlon=None, urcrnrlat=None,
llcrnrx=None, llcrnry=None,
urcrnrx=None, urcrnry=None,
width=None, height=None,
projection='cyl', resolution='c',
area_thresh=None, rsphere=6370997.0,
ellps=None, lat_ts=None,
lat_1=None, lat_2=None,
lat_0=None, lon_0=None,
lon_1=None, lon_2=None,
o_lon_p=None, o_lat_p=None,
k_0=None,
no_rot=False,
suppress_ticks=True,
satellite_height=35786000,
boundinglat=None,
fix_aspect=True,
anchor='C',
celestial=False,
round=False,
epsg=None,
ax=None):
# docstring is added after __init__ method definition
# set epsg code if given, set to 4326 for projection='cyl':
if epsg is not None:
self.epsg = epsg
elif projection == 'cyl':
self.epsg = 4326
# replace kwarg values with those implied by epsg code,
# if given.
if hasattr(self,'epsg'):
if str(self.epsg) not in epsg_dict:
raise ValueError('%s is not a supported EPSG code' %
self.epsg)
epsg_params = epsg_dict[str(self.epsg)]
for k in epsg_params:
if k == 'projection':
projection = epsg_params[k]
elif k == 'rsphere':
rsphere = epsg_params[k]
elif k == 'ellps':
ellps = epsg_params[k]
elif k == 'lat_1':
lat_1 = epsg_params[k]
elif k == 'lat_2':
lat_2 = epsg_params[k]
elif k == 'lon_0':
lon_0 = epsg_params[k]
elif k == 'lat_0':
lat_0 = epsg_params[k]
elif k == 'lat_ts':
lat_ts = epsg_params[k]
elif k == 'k_0':
k_0 = epsg_params[k]
# fix aspect to ratio to match aspect ratio of map projection
# region
self.fix_aspect = fix_aspect
# where to put plot in figure (default is 'C' or center)
self.anchor = anchor
# geographic or celestial coords?
self.celestial = celestial
# map projection.
self.projection = projection
# bounding lat (for pole-centered plots)
self.boundinglat = boundinglat
# is a round pole-centered plot desired?
self.round = round
# full disk projection?
self._fulldisk = False # default value
# set up projection parameter dict.
projparams = {}
projparams['proj'] = projection
# if ellps keyword specified, it over-rides rsphere.
if ellps is not None:
try:
elldict = pyproj.pj_ellps[ellps]
except KeyError:
raise ValueError(
'illegal ellps definition, allowed values are %s' %
pyproj.pj_ellps.keys())
projparams['a'] = elldict['a']
if 'b' in elldict:
projparams['b'] = elldict['b']
else:
projparams['b'] = projparams['a']*(1.0-(1.0/elldict['rf']))
else:
try:
if rsphere[0] > rsphere[1]:
projparams['a'] = rsphere[0]
projparams['b'] = rsphere[1]
else:
projparams['a'] = rsphere[1]
projparams['b'] = rsphere[0]
except:
if projection == 'tmerc':
# use bR_a instead of R because of obscure bug
# in proj4 for tmerc projection.
projparams['bR_a'] = rsphere
else:
projparams['R'] = rsphere
# set units to meters.
projparams['units']='m'
# check for sane values of lon_0, lat_0, lat_ts, lat_1, lat_2
_insert_validated(projparams, lat_0, 'lat_0', -90, 90)
_insert_validated(projparams, lat_1, 'lat_1', -90, 90)
_insert_validated(projparams, lat_2, 'lat_2', -90, 90)
_insert_validated(projparams, lat_ts, 'lat_ts', -90, 90)
_insert_validated(projparams, lon_0, 'lon_0', -360, 720)
_insert_validated(projparams, lon_1, 'lon_1', -360, 720)
_insert_validated(projparams, lon_2, 'lon_2', -360, 720)
if projection in ['geos','nsper']:
projparams['h'] = satellite_height
# check for sane values of projection corners.
using_corners = (None not in [llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat])
if using_corners:
self.llcrnrlon = _validated_ll(llcrnrlon, 'llcrnrlon', -360, 720)
self.urcrnrlon = _validated_ll(urcrnrlon, 'urcrnrlon', -360, 720)
self.llcrnrlat = _validated_ll(llcrnrlat, 'llcrnrlat', -90, 90)
self.urcrnrlat = _validated_ll(urcrnrlat, 'urcrnrlat', -90, 90)
# for each of the supported projections,
# compute lat/lon of domain corners
# and set values in projparams dict as needed.
if projection in ['lcc', 'eqdc', 'aea']:
if projection == 'lcc' and k_0 is not None:
projparams['k_0']=k_0
# if lat_0 is given, but not lat_1,
# set lat_1=lat_0
if lat_1 is None and lat_0 is not None:
lat_1 = lat_0
projparams['lat_1'] = lat_1
if lat_1 is None or lon_0 is None:
raise ValueError('must specify lat_1 or lat_0 and lon_0 for %s basemap (lat_2 is optional)' % _projnames[projection])
if lat_2 is None:
projparams['lat_2'] = lat_1
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,ucrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'stere':
if k_0 is not None:
projparams['k_0']=k_0
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Stereographic basemap (lat_ts is optional)')
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,ucrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in ['spstere', 'npstere',
'splaea', 'nplaea',
'spaeqd', 'npaeqd']:
if (projection == 'splaea' and boundinglat >= 0) or\
(projection == 'nplaea' and boundinglat <= 0):
msg='boundinglat cannot extend into opposite hemisphere'
raise ValueError(msg)
if boundinglat is None or lon_0 is None:
raise ValueError('must specify boundinglat and lon_0 for %s basemap' % _projnames[projection])
if projection[0] == 's':
sgn = -1
else:
sgn = 1
rootproj = projection[2:]
projparams['proj'] = rootproj
if rootproj == 'stere':
projparams['lat_ts'] = sgn * 90.
projparams['lat_0'] = sgn * 90.
self.llcrnrlon = lon_0 - sgn*45.
self.urcrnrlon = lon_0 + sgn*135.
proj = pyproj.Proj(projparams)
x,y = proj(lon_0,boundinglat)
lon,self.llcrnrlat = proj(math.sqrt(2.)*y,0.,inverse=True)
self.urcrnrlat = self.llcrnrlat
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[projection])
elif projection == 'laea':
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Lambert Azimuthal basemap')
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,ucrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in ['tmerc','gnom','cass','poly'] :
if projection == 'tmerc' and k_0 is not None:
projparams['k_0']=k_0
if projection == 'gnom' and 'R' not in projparams:
raise ValueError('gnomonic projection only works for perfect spheres - not ellipsoids')
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Transverse Mercator, Gnomonic, Cassini-Soldnerr and Polyconic basemap')
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,ucrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'ortho':
if 'R' not in projparams:
raise ValueError('orthographic projection only works for perfect spheres - not ellipsoids')
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Orthographic basemap')
if (lat_0 == 90 or lat_0 == -90) and\
None in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
# for ortho plot centered on pole, set boundinglat to equator.
# (so meridian labels can be drawn in this special case).
self.boundinglat = 0
self.round = True
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if not using_corners:
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
self._fulldisk = True
else:
self._fulldisk = False
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
# FIXME: won't work for points exactly on equator??
if np.abs(lat_0) < 1.e-2: lat_0 = 1.e-2
projparams['lat_0'] = lat_0
elif projection == 'geos':
if lat_0 is not None and lat_0 != 0:
raise ValueError('lat_0 must be zero for Geostationary basemap')
if lon_0 is None:
raise ValueError('must specify lon_0 for Geostationary basemap')
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if not using_corners:
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
self._fulldisk = True
else:
self._fulldisk = False
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'nsper':
if 'R' not in projparams:
raise ValueError('near-sided perspective projection only works for perfect spheres - not ellipsoids')
if lat_0 is None or lon_0 is None:
msg='must specify lon_0 and lat_0 for near-sided perspective Basemap'
raise ValueError(msg)
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if not using_corners:
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
self._fulldisk = True
else:
self._fulldisk = False
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in _pseudocyl:
if lon_0 is None:
raise ValueError('must specify lon_0 for %s projection' % _projnames[self.projection])
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
llcrnrlon = lon_0-180.
llcrnrlat = -90.
urcrnrlon = lon_0+180
urcrnrlat = 90.
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'omerc':
if k_0 is not None:
projparams['k_0']=k_0
if lat_1 is None or lon_1 is None or lat_2 is None or lon_2 is None:
raise ValueError('must specify lat_1,lon_1 and lat_2,lon_2 for Oblique Mercator basemap')
projparams['lat_1'] = lat_1
projparams['lon_1'] = lon_1
projparams['lat_2'] = lat_2
projparams['lon_2'] = lon_2
projparams['lat_0'] = lat_0
if no_rot:
projparams['no_rot']=''
#if not using_corners:
# raise ValueError, 'cannot specify map region with width and height keywords for this projection, please specify lat/lon values of corners'
if not using_corners:
if width is None or height is None:
raise ValueError('must either specify lat/lon values of corners (llcrnrlon,llcrnrlat,ucrnrlon,urcrnrlat) in degrees or width and height in meters')
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection == 'aeqd':
if lat_0 is None or lon_0 is None:
raise ValueError('must specify lat_0 and lon_0 for Azimuthal Equidistant basemap')
if not using_corners:
if width is None or height is None:
self._fulldisk = True
llcrnrlon = -180.
llcrnrlat = -90.
urcrnrlon = 180
urcrnrlat = 90.
else:
self._fulldisk = False
if lon_0 is None or lat_0 is None:
raise ValueError('must specify lon_0 and lat_0 when using width, height to specify projection region')
if not self._fulldisk:
llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat = _choosecorners(width,height,**projparams)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
elif projection in _cylproj:
if projection == 'merc' or projection == 'cea':
if lat_ts is None:
lat_ts = 0.
projparams['lat_ts'] = lat_ts
if not using_corners:
llcrnrlat = -90.
urcrnrlat = 90.
if lon_0 is not None:
llcrnrlon = lon_0-180.
urcrnrlon = lon_0+180.
else:
llcrnrlon = -180.
urcrnrlon = 180
if projection == 'merc':
# clip plot region to be within -89.99S to 89.99N
# (mercator is singular at poles)
if llcrnrlat < -89.99: llcrnrlat = -89.99
if llcrnrlat > 89.99: llcrnrlat = 89.99
if urcrnrlat < -89.99: urcrnrlat = -89.99
if urcrnrlat > 89.99: urcrnrlat = 89.99
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
if lon_0 is not None:
projparams['lon_0'] = lon_0
else:
projparams['lon_0']=0.5*(llcrnrlon+urcrnrlon)
elif projection == 'rotpole':
if lon_0 is None or o_lon_p is None or o_lat_p is None:
msg='must specify lon_0,o_lat_p,o_lon_p for rotated pole Basemap'
raise ValueError(msg)
if width is not None or height is not None:
sys.stdout.write('warning: width and height keywords ignored for %s projection' % _projnames[self.projection])
projparams['lon_0']=lon_0
projparams['o_lon_p']=o_lon_p
projparams['o_lat_p']=o_lat_p
projparams['o_proj']='longlat'
projparams['proj']='ob_tran'
if not using_corners and None in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
raise ValueError('must specify lat/lon values of corners in degrees')
if None not in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
p = pyproj.Proj(projparams)
llcrnrx = _dg2rad*llcrnrx; llcrnry = _dg2rad*llcrnry
urcrnrx = _dg2rad*urcrnrx; urcrnry = _dg2rad*urcrnry
llcrnrlon, llcrnrlat = p(llcrnrx,llcrnry,inverse=True)
urcrnrlon, urcrnrlat = p(urcrnrx,urcrnry,inverse=True)
self.llcrnrlon = llcrnrlon; self.llcrnrlat = llcrnrlat
self.urcrnrlon = urcrnrlon; self.urcrnrlat = urcrnrlat
else:
raise ValueError(_unsupported_projection % projection)
# initialize proj4
proj = Proj(projparams,self.llcrnrlon,self.llcrnrlat,self.urcrnrlon,self.urcrnrlat)
# make sure axis ticks are suppressed.
self.noticks = suppress_ticks
# map boundary not yet drawn.
self._mapboundarydrawn = False
# make Proj instance a Basemap instance variable.
self.projtran = proj
# copy some Proj attributes.
atts = ['rmajor','rminor','esq','flattening','ellipsoid','projparams']
for att in atts:
self.__dict__[att] = proj.__dict__[att]
# these only exist for geostationary projection.
if hasattr(proj,'_width'):
self.__dict__['_width'] = proj.__dict__['_width']
if hasattr(proj,'_height'):
self.__dict__['_height'] = proj.__dict__['_height']
# spatial reference string (useful for georeferencing output
# images with gdal_translate).
if hasattr(self,'_proj4'):
#self.srs = proj._proj4.srs
self.srs = proj._proj4.pjinitstring
else:
pjargs = []
for key,value in self.projparams.items():
# 'cyl' projection translates to 'eqc' in PROJ.4
if projection == 'cyl' and key == 'proj':
value = 'eqc'
# ignore x_0 and y_0 settings for 'cyl' projection
# (they are not consistent with what PROJ.4 uses)
elif projection == 'cyl' and key in ['x_0','y_0']:
continue
pjargs.append('+'+key+"="+str(value)+' ')
self.srs = ''.join(pjargs)
self.proj4string = self.srs
# set instance variables defining map region.
self.xmin = proj.xmin
self.xmax = proj.xmax
self.ymin = proj.ymin
self.ymax = proj.ymax
if projection == 'cyl':
self.aspect = (self.urcrnrlat-self.llcrnrlat)/(self.urcrnrlon-self.llcrnrlon)
else:
self.aspect = (proj.ymax-proj.ymin)/(proj.xmax-proj.xmin)
if projection in ['geos','ortho','nsper'] and \
None not in [llcrnrx,llcrnry,urcrnrx,urcrnry]:
self.llcrnrx = llcrnrx+0.5*proj.xmax
self.llcrnry = llcrnry+0.5*proj.ymax
self.urcrnrx = urcrnrx+0.5*proj.xmax
self.urcrnry = urcrnry+0.5*proj.ymax
self._fulldisk = False
else:
self.llcrnrx = proj.llcrnrx
self.llcrnry = proj.llcrnry
self.urcrnrx = proj.urcrnrx
self.urcrnry = proj.urcrnry
if self.projection == 'rotpole':
lon0,lat0 = self(0.5*(self.llcrnrx + self.urcrnrx),\
0.5*(self.llcrnry + self.urcrnry),\
inverse=True)
self.projparams['lat_0']=lat0
# if ax == None, pyplot.gca may be used.
self.ax = ax
self.lsmask = None
# This will record hashs of Axes instances.
self._initialized_axes = set()
# set defaults for area_thresh.
self.resolution = resolution
# celestial=True implies resolution=None (no coastlines).
if self.celestial:
self.resolution=None
if area_thresh is None and self.resolution is not None:
if resolution == 'c':
area_thresh = 10000.
elif resolution == 'l':
area_thresh = 1000.
elif resolution == 'i':
area_thresh = 100.
elif resolution == 'h':
area_thresh = 10.
elif resolution == 'f':
area_thresh = 1.
else:
raise ValueError("boundary resolution must be one of 'c','l','i','h' or 'f'")
self.area_thresh = area_thresh
# define map boundary polygon (in lat/lon coordinates)
blons, blats, self._boundarypolyll, self._boundarypolyxy = self._getmapboundary()
self.boundarylats = blats
self.boundarylons = blons
# set min/max lats for projection domain.
if self.projection in _cylproj:
self.latmin = self.llcrnrlat
self.latmax = self.urcrnrlat
self.lonmin = self.llcrnrlon
self.lonmax = self.urcrnrlon
elif self.projection in ['ortho','geos','nsper'] + _pseudocyl:
self.latmin = -90.
self.latmax = 90.
self.lonmin = self.llcrnrlon
self.lonmax = self.urcrnrlon
else:
lons, lats = self.makegrid(1001,1001)
lats = ma.masked_where(lats > 1.e20,lats)
lons = ma.masked_where(lons > 1.e20,lons)
self.latmin = lats.min()
self.latmax = lats.max()
self.lonmin = lons.min()
self.lonmax = lons.max()
NPole = _geoslib.Point(self(0.,90.))
SPole = _geoslib.Point(self(0.,-90.))
if lat_0 is None:
lon_0, lat_0 =\
self(0.5*(self.xmin+self.xmax),
0.5*(self.ymin+self.ymax),inverse=True)
Dateline = _geoslib.Point(self(180.,lat_0))
Greenwich = _geoslib.Point(self(0.,lat_0))
hasNP = NPole.within(self._boundarypolyxy)
hasSP = SPole.within(self._boundarypolyxy)
hasPole = hasNP or hasSP
hasDateline = Dateline.within(self._boundarypolyxy)
hasGreenwich = Greenwich.within(self._boundarypolyxy)
# projection crosses dateline (and not Greenwich or pole).
if not hasPole and hasDateline and not hasGreenwich:
if self.lonmin < 0 and self.lonmax > 0.:
lons = np.where(lons < 0, lons+360, lons)
self.lonmin = lons.min()
self.lonmax = lons.max()
# read in coastline polygons, only keeping those that
# intersect map boundary polygon.
if self.resolution is not None:
self.coastsegs, self.coastpolygontypes =\
self._readboundarydata('gshhs',as_polygons=True)
# reformat for use in matplotlib.patches.Polygon.
self.coastpolygons = []
for seg in self.coastsegs:
x, y = list(zip(*seg))
self.coastpolygons.append((x,y))
# replace coastsegs with line segments (instead of polygons)
self.coastsegs, types =\
self._readboundarydata('gshhs',as_polygons=False)
# create geos Polygon structures for land areas.
# currently only used in is_land method.
self.landpolygons=[]
self.lakepolygons=[]
if self.resolution is not None and len(self.coastpolygons) > 0:
#self.islandinlakepolygons=[]
#self.lakeinislandinlakepolygons=[]
x, y = list(zip(*self.coastpolygons))
for x,y,typ in zip(x,y,self.coastpolygontypes):
b = np.asarray([x,y]).T
if typ == 1: self.landpolygons.append(_geoslib.Polygon(b))
if typ == 2: self.lakepolygons.append(_geoslib.Polygon(b))
#if typ == 3: self.islandinlakepolygons.append(_geoslib.Polygon(b))
#if typ == 4: self.lakeinislandinlakepolygons.append(_geoslib.Polygon(b))
# set __init__'s docstring
__init__.__doc__ = _Basemap_init_doc
def __call__(self,x,y,inverse=False):
"""
Calling a Basemap class instance with the arguments lon, lat will
convert lon/lat (in degrees) to x/y map projection
coordinates (in meters). If optional keyword ``inverse`` is
True (default is False), the inverse transformation from x/y
to lon/lat is performed.
For cylindrical equidistant projection (``cyl``), this
does nothing (i.e. x,y == lon,lat).
For non-cylindrical projections, the inverse transformation
always returns longitudes between -180 and 180 degrees. For
cylindrical projections (self.projection == ``cyl``,
``cea``, ``mill``, ``gall`` or ``merc``)
the inverse transformation will return longitudes between
self.llcrnrlon and self.llcrnrlat.
Input arguments lon, lat can be either scalar floats,
sequences, or numpy arrays.
"""
if self.celestial:
# don't assume center of map is at greenwich
# (only relevant for cyl or pseudo-cyl projections)
if self.projection in _pseudocyl or self.projection in _cylproj:
lon_0=self.projparams['lon_0']
else:
lon_0 = 0.
if self.celestial and not inverse:
try:
x = 2.*lon_0-x
except TypeError:
x = [2*lon_0-xx for xx in x]
if self.projection == 'rotpole' and inverse:
try:
x = _dg2rad*x
except TypeError:
x = [_dg2rad*xx for xx in x]
try:
y = _dg2rad*y
except TypeError:
y = [_dg2rad*yy for yy in y]
xout,yout = self.projtran(x,y,inverse=inverse)
if self.celestial and inverse:
try:
xout = -2.*lon_0-xout
except:
xout = [-2.*lon_0-xx for xx in xout]
if self.projection == 'rotpole' and not inverse:
try:
xout = _rad2dg*xout
xout = np.where(xout < 0., xout+360, xout)
except TypeError:
xout = [_rad2dg*xx for xx in xout]
xout = [xx+360. if xx < 0 else xx for xx in xout]
try:
yout = _rad2dg*yout
except TypeError:
yout = [_rad2dg*yy for yy in yout]
return xout,yout
def makegrid(self,nx,ny,returnxy=False):
"""
return arrays of shape (ny,nx) containing lon,lat coordinates of
an equally spaced native projection grid.
If ``returnxy = True``, the x,y values of the grid are returned also.
"""
return self.projtran.makegrid(nx,ny,returnxy=returnxy)
def _readboundarydata(self,name,as_polygons=False):
"""
read boundary data, clip to map projection region.
"""
msg = dedent("""
Unable to open boundary dataset file. Only the 'crude', 'low',
'intermediate' and 'high' resolution datasets are installed by default.
If you are requesting a 'full' resolution dataset, you may need to
download and install those files separately
(see the basemap README for details).""")
# only gshhs coastlines can be polygons.
if name != 'gshhs': as_polygons=False
try:
bdatfile = open(os.path.join(basemap_datadir,name+'_'+self.resolution+'.dat'),'rb')
bdatmetafile = open(os.path.join(basemap_datadir,name+'meta_'+self.resolution+'.dat'),'r')
except:
raise IOError(msg)
polygons = []
polygon_types = []
# coastlines are polygons, other boundaries are line segments.
if name == 'gshhs':
Shape = _geoslib.Polygon
else:
Shape = _geoslib.LineString
# see if map projection region polygon contains a pole.
NPole = _geoslib.Point(self(0.,90.))
SPole = _geoslib.Point(self(0.,-90.))
boundarypolyxy = self._boundarypolyxy
boundarypolyll = self._boundarypolyll
hasNP = NPole.within(boundarypolyxy)
hasSP = SPole.within(boundarypolyxy)
containsPole = hasNP or hasSP
# these projections cannot cross pole.
if containsPole and\
self.projection in _cylproj + _pseudocyl + ['geos']:
raise ValueError('%s projection cannot cross pole'%(self.projection))
# make sure some projections have has containsPole=True
# we will compute the intersections in stereographic
# coordinates, then transform back. This is
# because these projections are only defined on a hemisphere, and
# some boundary features (like Eurasia) would be undefined otherwise.
tostere =\
['omerc','ortho','gnom','nsper','nplaea','npaeqd','splaea','spaeqd']
if self.projection in tostere and name == 'gshhs':
containsPole = True
lon_0=self.projparams['lon_0']
lat_0=self.projparams['lat_0']
re = self.projparams['R']
# center of stereographic projection restricted to be
# nearest one of 6 points on the sphere (every 90 deg lat/lon).
lon0 = 90.*(np.around(lon_0/90.))
lat0 = 90.*(np.around(lat_0/90.))
if np.abs(int(lat0)) == 90: lon0=0.
maptran = pyproj.Proj(proj='stere',lon_0=lon0,lat_0=lat0,R=re)
# boundary polygon for ortho/gnom/nsper projection
# in stereographic coordinates.
b = self._boundarypolyll.boundary
blons = b[:,0]; blats = b[:,1]
b[:,0], b[:,1] = maptran(blons, blats)
boundarypolyxy = _geoslib.Polygon(b)
for line in bdatmetafile:
linesplit = line.split()
area = float(linesplit[1])
south = float(linesplit[3])
north = float(linesplit[4])
crossdatelineE=False; crossdatelineW=False
if name == 'gshhs':
id = linesplit[7]
if id.endswith('E'):
crossdatelineE = True
elif id.endswith('W'):
crossdatelineW = True
# make sure south/north limits of dateline crossing polygons
# (Eurasia) are the same, since they will be merged into one.
# (this avoids having one filtered out and not the other).
if crossdatelineE:
south_save=south
north_save=north
if crossdatelineW:
south=south_save
north=north_save
if area < 0.: area = 1.e30
useit = self.latmax>=south and self.latmin<=north and area>self.area_thresh
if useit:
typ = int(linesplit[0])
npts = int(linesplit[2])
offsetbytes = int(linesplit[5])
bytecount = int(linesplit[6])
bdatfile.seek(offsetbytes,0)
# read in binary string convert into an npts by 2
# numpy array (first column is lons, second is lats).
polystring = bdatfile.read(bytecount)
# binary data is little endian.
b = np.array(np.fromstring(polystring,dtype='<f4'),'f8')
b.shape = (npts,2)
b2 = b.copy()
# merge polygons that cross dateline.
poly = Shape(b)
# hack to try to avoid having Antartica filled polygon
# covering entire map (if skipAnart = False, this happens
# for ortho lon_0=-120, lat_0=60, for example).
skipAntart = self.projection in tostere and south < -89 and \
not hasSP
if crossdatelineE and not skipAntart:
if not poly.is_valid(): poly=poly.fix()
polyE = poly
continue
elif crossdatelineW and not skipAntart:
if not poly.is_valid(): poly=poly.fix()
b = poly.boundary
b[:,0] = b[:,0]+360.
poly = Shape(b)
poly = poly.union(polyE)
if not poly.is_valid(): poly=poly.fix()
b = poly.boundary
b2 = b.copy()
# fix Antartica.
if name == 'gshhs' and south < -89:
b = b[4:,:]
b2 = b.copy()
poly = Shape(b)
# if map boundary polygon is a valid one in lat/lon
# coordinates (i.e. it does not contain either pole),
# the intersections of the boundary geometries
# and the map projection region can be computed before
# transforming the boundary geometry to map projection
# coordinates (this saves time, especially for small map
# regions and high-resolution boundary geometries).
if not containsPole:
# close Antarctica.
if name == 'gshhs' and south < -89:
lons2 = b[:,0]
lats = b[:,1]
lons1 = lons2 - 360.
lons3 = lons2 + 360.
lons = lons1.tolist()+lons2.tolist()+lons3.tolist()
lats = lats.tolist()+lats.tolist()+lats.tolist()
lonstart,latstart = lons[0], lats[0]
lonend,latend = lons[-1], lats[-1]
lons.insert(0,lonstart)
lats.insert(0,-90.)
lons.append(lonend)
lats.append(-90.)
b = np.empty((len(lons),2),np.float64)
b[:,0] = lons; b[:,1] = lats
poly = Shape(b)
if not poly.is_valid(): poly=poly.fix()
# if polygon instersects map projection
# region, process it.
if poly.intersects(boundarypolyll):
if name != 'gshhs' or as_polygons:
geoms = poly.intersection(boundarypolyll)
else:
# convert polygons to line segments
poly = _geoslib.LineString(poly.boundary)
geoms = poly.intersection(boundarypolyll)
# iterate over geometries in intersection.
for psub in geoms:
b = psub.boundary
blons = b[:,0]; blats = b[:,1]
bx, by = self(blons, blats)
polygons.append(list(zip(bx,by)))
polygon_types.append(typ)
else:
# create duplicate polygons shifted by -360 and +360
# (so as to properly treat polygons that cross
# Greenwich meridian).
b2[:,0] = b[:,0]-360
poly1 = Shape(b2)
b2[:,0] = b[:,0]+360
poly2 = Shape(b2)
polys = [poly1,poly,poly2]
for poly in polys:
# try to fix "non-noded intersection" errors.
if not poly.is_valid(): poly=poly.fix()
# if polygon instersects map projection
# region, process it.
if poly.intersects(boundarypolyll):
if name != 'gshhs' or as_polygons:
geoms = poly.intersection(boundarypolyll)
else:
# convert polygons to line segments
# note: use fix method here or Eurasia
# line segments sometimes disappear.
poly = _geoslib.LineString(poly.fix().boundary)
geoms = poly.intersection(boundarypolyll)
# iterate over geometries in intersection.
for psub in geoms:
b = psub.boundary
blons = b[:,0]; blats = b[:,1]
# transformation from lat/lon to
# map projection coordinates.
bx, by = self(blons, blats)
if not as_polygons or len(bx) > 4:
polygons.append(list(zip(bx,by)))
polygon_types.append(typ)
# if map boundary polygon is not valid in lat/lon
# coordinates, compute intersection between map
# projection region and boundary geometries in map
# projection coordinates.
else:
# transform coordinates from lat/lon
# to map projection coordinates.
# special case for ortho/gnom/nsper, compute coastline polygon
# vertices in stereographic coords.
if name == 'gshhs' and as_polygons and self.projection in tostere:
b[:,0], b[:,1] = maptran(b[:,0], b[:,1])
else:
b[:,0], b[:,1] = self(b[:,0], b[:,1])
goodmask = np.logical_and(b[:,0]<1.e20,b[:,1]<1.e20)
# if less than two points are valid in
# map proj coords, skip this geometry.
if np.sum(goodmask) <= 1: continue
if name != 'gshhs' or (name == 'gshhs' and not as_polygons):
# if not a polygon,
# just remove parts of geometry that are undefined
# in this map projection.
bx = np.compress(goodmask, b[:,0])
by = np.compress(goodmask, b[:,1])
# split coastline segments that jump across entire plot.
xd = (bx[1:]-bx[0:-1])**2
yd = (by[1:]-by[0:-1])**2
dist = np.sqrt(xd+yd)
split = dist > 0.5*(self.xmax-self.xmin)
if np.sum(split) and self.projection not in _cylproj:
ind = (np.compress(split,np.squeeze(split*np.indices(xd.shape)))+1).tolist()
iprev = 0
ind.append(len(xd))
for i in ind:
# don't add empty lists.
if len(list(range(iprev,i))):
polygons.append(list(zip(bx[iprev:i],by[iprev:i])))
iprev = i
else:
polygons.append(list(zip(bx,by)))
polygon_types.append(typ)
continue
# create a GEOS geometry object.
if name == 'gshhs' and not as_polygons:
# convert polygons to line segments
poly = _geoslib.LineString(poly.boundary)
else:
poly = Shape(b)
# this is a workaround to avoid
# "GEOS_ERROR: TopologyException:
# found non-noded intersection between ..."
if not poly.is_valid(): poly=poly.fix()
# if geometry instersects map projection
# region, and doesn't have any invalid points, process it.
if goodmask.all() and poly.intersects(boundarypolyxy):
# if geometry intersection calculation fails,
# just move on.
try:
geoms = poly.intersection(boundarypolyxy)
except:
continue
# iterate over geometries in intersection.
for psub in geoms:
b = psub.boundary
# if projection in ['ortho','gnom','nsper'],
# transform polygon from stereographic
# to ortho/gnom/nsper coordinates.
if self.projection in tostere:
# if coastline polygon covers more than 99%
# of map region for fulldisk projection,
# it's probably bogus, so skip it.
#areafrac = psub.area()/boundarypolyxy.area()
#if self.projection == ['ortho','nsper']:
# if name == 'gshhs' and\
# self._fulldisk and\
# areafrac > 0.99: continue
# inverse transform from stereographic
# to lat/lon.
b[:,0], b[:,1] = maptran(b[:,0], b[:,1], inverse=True)
# orthographic/gnomonic/nsper.
b[:,0], b[:,1]= self(b[:,0], b[:,1])
if not as_polygons or len(b) > 4:
polygons.append(list(zip(b[:,0],b[:,1])))
polygon_types.append(typ)
return polygons, polygon_types
def _getmapboundary(self):
"""
create map boundary polygon (in lat/lon and x/y coordinates)
"""
nx = 100; ny = 100
maptran = self
if self.projection in ['ortho','geos','nsper']:
# circular region.
thetas = np.linspace(0.,2.*np.pi,2*nx*ny)[:-1]
rminor = self._height
rmajor = self._width
x = rmajor*np.cos(thetas) + rmajor
y = rminor*np.sin(thetas) + rminor
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
# compute proj instance for full disk, if necessary.
if not self._fulldisk:
projparms = self.projparams.copy()
del projparms['x_0']
del projparms['y_0']
if self.projection == 'ortho':
llcrnrx = -self.rmajor
llcrnry = -self.rmajor
urcrnrx = -llcrnrx
urcrnry = -llcrnry
else:
llcrnrx = -self._width
llcrnry = -self._height
urcrnrx = -llcrnrx
urcrnry = -llcrnry
projparms['x_0']=-llcrnrx
projparms['y_0']=-llcrnry
maptran = pyproj.Proj(projparms)
elif self.projection == 'aeqd' and self._fulldisk:
# circular region.
thetas = np.linspace(0.,2.*np.pi,2*nx*ny)[:-1]
rminor = self._height
rmajor = self._width
x = rmajor*np.cos(thetas) + rmajor
y = rminor*np.sin(thetas) + rminor
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
elif self.projection in _pseudocyl:
nx = 10*nx; ny = 10*ny
# quasi-elliptical region.
lon_0 = self.projparams['lon_0']
# left side
lats1 = np.linspace(-89.9999,89.9999,ny).tolist()
lons1 = len(lats1)*[lon_0-179.9]
# top.
lons2 = np.linspace(lon_0-179.9,lon_0+179.9,nx).tolist()
lats2 = len(lons2)*[89.9999]
# right side
lats3 = np.linspace(89.9999,-89.9999,ny).tolist()
lons3 = len(lats3)*[lon_0+179.9]
# bottom.
lons4 = np.linspace(lon_0+179.9,lon_0-179.9,nx).tolist()
lats4 = len(lons4)*[-89.9999]
lons = np.array(lons1+lons2+lons3+lons4,np.float64)
lats = np.array(lats1+lats2+lats3+lats4,np.float64)
x, y = maptran(lons,lats)
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
else: # all other projections are rectangular.
nx = 100*nx; ny = 100*ny
# left side (x = xmin, ymin <= y <= ymax)
yy = np.linspace(self.ymin, self.ymax, ny)[:-1]
x = len(yy)*[self.xmin]; y = yy.tolist()
# top (y = ymax, xmin <= x <= xmax)
xx = np.linspace(self.xmin, self.xmax, nx)[:-1]
x = x + xx.tolist()
y = y + len(xx)*[self.ymax]
# right side (x = xmax, ymin <= y <= ymax)
yy = np.linspace(self.ymax, self.ymin, ny)[:-1]
x = x + len(yy)*[self.xmax]; y = y + yy.tolist()
# bottom (y = ymin, xmin <= x <= xmax)
xx = np.linspace(self.xmax, self.xmin, nx)[:-1]
x = x + xx.tolist()
y = y + len(xx)*[self.ymin]
x = np.array(x,np.float64)
y = np.array(y,np.float64)
b = np.empty((4,2),np.float64)
b[:,0]=[self.xmin,self.xmin,self.xmax,self.xmax]
b[:,1]=[self.ymin,self.ymax,self.ymax,self.ymin]
boundaryxy = _geoslib.Polygon(b)
if self.projection in _cylproj:
# make sure map boundary doesn't quite include pole.
if self.urcrnrlat > 89.9999:
urcrnrlat = 89.9999
else:
urcrnrlat = self.urcrnrlat
if self.llcrnrlat < -89.9999:
llcrnrlat = -89.9999
else:
llcrnrlat = self.llcrnrlat
lons = [self.llcrnrlon, self.llcrnrlon, self.urcrnrlon, self.urcrnrlon]
lats = [llcrnrlat, urcrnrlat, urcrnrlat, llcrnrlat]
self.boundarylonmin = min(lons)
self.boundarylonmax = max(lons)
x, y = self(lons, lats)
b = np.empty((len(x),2),np.float64)
b[:,0]=x; b[:,1]=y
boundaryxy = _geoslib.Polygon(b)
else:
if self.projection not in _pseudocyl:
lons, lats = maptran(x,y,inverse=True)
# fix lons so there are no jumps.
n = 1
lonprev = lons[0]
for lon,lat in zip(lons[1:],lats[1:]):
if np.abs(lon-lonprev) > 90.:
if lonprev < 0:
lon = lon - 360.
else:
lon = lon + 360
lons[n] = lon
lonprev = lon
n = n + 1
self.boundarylonmin = lons.min()
self.boundarylonmax = lons.max()
# for circular full disk projections where boundary is
# a latitude circle, set boundarylonmax and boundarylonmin
# to cover entire world (so parallels will be drawn).
if self._fulldisk and \
np.abs(self.boundarylonmax-self.boundarylonmin) < 1.:
self.boundarylonmin = -180.
self.boundarylonmax = 180.
b = np.empty((len(lons),2),np.float64)
b[:,0] = lons; b[:,1] = lats
boundaryll = _geoslib.Polygon(b)
return lons, lats, boundaryll, boundaryxy
def drawmapboundary(self,color='k',linewidth=1.0,fill_color=None,\
zorder=None,ax=None):
"""
draw boundary around map projection region, optionally
filling interior of region.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth line width for boundary (default 1.)
color color of boundary line (default black)
fill_color fill the map region background with this
color (default is to fill with axis
background color). If set to the string
'none', no filling is done.
zorder sets the zorder for filling map background
(default 0).
ax axes instance to use
(default None, use default axes instance).
============== ====================================================
returns matplotlib.collections.PatchCollection representing map boundary.
"""
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# if no fill_color given, use axes background color.
# if fill_color is string 'none', really don't fill.
if fill_color is None:
fill_color = ax.get_axis_bgcolor()
elif fill_color == 'none' or fill_color == 'None':
fill_color = None
limb = None
if self.projection in ['ortho','geos','nsper'] or (self.projection=='aeqd' and\
self._fulldisk):
limb = Ellipse((self._width,self._height),2.*self._width,2.*self._height)
if self.projection in ['ortho','geos','nsper','aeqd'] and self._fulldisk:
ax.set_frame_on(False)
# elliptical region.
ax.add_patch(limb)
self._mapboundarydrawn = limb
if fill_color is None:
limb.set_fill(False)
else:
limb.set_facecolor(fill_color)
limb.set_zorder(0)
limb.set_edgecolor(color)
limb.set_linewidth(linewidth)
limb.set_clip_on(False)
if zorder is not None:
limb.set_zorder(zorder)
elif self.projection in _pseudocyl: # elliptical region.
ax.set_frame_on(False)
nx = 100; ny = 100
if self.projection == 'vandg':
nx = 10*nx; ny = 10*ny
# quasi-elliptical region.
lon_0 = self.projparams['lon_0']
# left side
lats1 = np.linspace(-89.9999,89.99999,ny).tolist()
lons1 = len(lats1)*[lon_0-179.9]
# top.
lons2 = np.linspace(lon_0-179.9999,lon_0+179.9999,nx).tolist()
lats2 = len(lons2)*[89.9999]
# right side
lats3 = np.linspace(89.9999,-89.9999,ny).tolist()
lons3 = len(lats3)*[lon_0+179.9999]
# bottom.
lons4 = np.linspace(lon_0+179.9999,lon_0-179.9999,nx).tolist()
lats4 = len(lons4)*[-89.9999]
lons = np.array(lons1+lons2+lons3+lons4,np.float64)
lats = np.array(lats1+lats2+lats3+lats4,np.float64)
x, y = self(lons,lats)
xy = list(zip(x,y))
limb = Polygon(xy,edgecolor=color,linewidth=linewidth)
ax.add_patch(limb)
self._mapboundarydrawn = limb
if fill_color is None:
limb.set_fill(False)
else:
limb.set_facecolor(fill_color)
limb.set_zorder(0)
limb.set_clip_on(False)
if zorder is not None:
limb.set_zorder(zorder)
elif self.round:
ax.set_frame_on(False)
limb = Circle((0.5*(self.xmax+self.xmin),0.5*(self.ymax+self.ymin)),
radius=0.5*(self.xmax-self.xmin),fc='none')
ax.add_patch(limb)
self._mapboundarydrawn = limb
if fill_color is None:
limb.set_fill(False)
else:
limb.set_facecolor(fill_color)
limb.set_zorder(0)
limb.set_clip_on(False)
if zorder is not None:
limb.set_zorder(zorder)
else: # all other projections are rectangular.
# use axesPatch for fill_color, spine for border line props.
for spine in ax.spines.values():
spine.set_linewidth(linewidth)
if self.projection not in ['geos','ortho','nsper']:
limb = ax.axesPatch
if fill_color is not None:
limb.set_facecolor(fill_color)
for spine in ax.spines.values():
spine.set_edgecolor(color)
ax.set_frame_on(True)
# FIXME? should zorder be set separately for edge and background?
if zorder is not None:
limb.set_zorder(zorder)
for spine in ax.spines.values():
spine.set_zorder(zorder)
else:
# use axesPatch for fill_color, spine for border line props.
for spine in ax.spines.values():
spine.set_edgecolor(color)
ax.set_frame_on(True)
# FIXME? should zorder be set separately for edge and background?
if zorder is not None:
ax.axesPatch.set_zorder(zorder)
for spine in ax.spines.values():
spine.set_zorder(zorder)
# for geos or ortho projections, also
# draw and fill map projection limb, clipped
# to rectangular region.
ax.add_patch(limb)
self._mapboundarydrawn = limb
if fill_color is None:
limb.set_fill(False)
else:
limb.set_facecolor(fill_color)
limb.set_zorder(0)
limb.set_edgecolor(color)
limb.set_linewidth(linewidth)
if zorder is not None:
limb.set_zorder(zorder)
limb.set_clip_on(True)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return limb
def fillcontinents(self,color='0.8',lake_color=None,ax=None,zorder=None,alpha=None):
"""
Fill continents.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
color color to fill continents (default gray).
lake_color color to fill inland lakes (default axes background).
ax axes instance (overrides default axes instance).
zorder sets the zorder for the continent polygons (if not
specified, uses default zorder for a Polygon patch).
Set to zero if you want to paint over the filled
continents).
alpha sets alpha transparency for continent polygons
============== ====================================================
After filling continents, lakes are re-filled with
axis background color.
returns a list of matplotlib.patches.Polygon objects.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# get axis background color.
axisbgc = ax.get_axis_bgcolor()
npoly = 0
polys = []
for x,y in self.coastpolygons:
xa = np.array(x,np.float32)
ya = np.array(y,np.float32)
# check to see if all four corners of domain in polygon (if so,
# don't draw since it will just fill in the whole map).
# ** turn this off for now since it prevents continents that
# fill the whole map from being filled **
#delx = 10; dely = 10
#if self.projection in ['cyl']:
# delx = 0.1
# dely = 0.1
#test1 = np.fabs(xa-self.urcrnrx) < delx
#test2 = np.fabs(xa-self.llcrnrx) < delx
#test3 = np.fabs(ya-self.urcrnry) < dely
#test4 = np.fabs(ya-self.llcrnry) < dely
#hasp1 = np.sum(test1*test3)
#hasp2 = np.sum(test2*test3)
#hasp4 = np.sum(test2*test4)
#hasp3 = np.sum(test1*test4)
#if not hasp1 or not hasp2 or not hasp3 or not hasp4:
if 1:
xy = list(zip(xa.tolist(),ya.tolist()))
if self.coastpolygontypes[npoly] not in [2,4]:
poly = Polygon(xy,facecolor=color,edgecolor=color,linewidth=0)
else: # lakes filled with background color by default
if lake_color is None:
poly = Polygon(xy,facecolor=axisbgc,edgecolor=axisbgc,linewidth=0)
else:
poly = Polygon(xy,facecolor=lake_color,edgecolor=lake_color,linewidth=0)
if zorder is not None:
poly.set_zorder(zorder)
if alpha is not None:
poly.set_alpha(alpha)
ax.add_patch(poly)
polys.append(poly)
npoly = npoly + 1
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip continent polygons for round polar plots.
if self.round: polys,c = self._clipcircle(ax,polys)
return polys
def _clipcircle(self,ax,coll):
c = Circle((0.5*(self.xmax+self.xmin),0.5*(self.ymax+self.ymin)),
radius=0.5*(self.xmax-self.xmin),fc='none')
if c not in ax.patches:
p = ax.add_patch(c)
p.set_clip_on(False)
try:
coll.set_clip_path(c)
except:
for item in coll:
item.set_clip_path(c)
return coll,c
def drawcoastlines(self,linewidth=1.,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw coastlines.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth coastline width (default 1.)
linestyle coastline linestyle (default solid)
color coastline color (default black)
antialiased antialiasing switch for coastlines (default True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the coastlines (if not specified,
uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
coastlines = LineCollection(self.coastsegs,antialiaseds=(antialiased,))
coastlines.set_color(color)
coastlines.set_linestyle(linestyle)
coastlines.set_linewidth(linewidth)
coastlines.set_label('_nolabel_')
if zorder is not None:
coastlines.set_zorder(zorder)
# clip coastlines for round polar plots.
if self.round: coastlines,c = self._clipcircle(ax,coastlines)
ax.add_collection(coastlines)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return coastlines
def drawcountries(self,linewidth=0.5,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw country boundaries.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth country boundary line width (default 0.5)
linestyle coastline linestyle (default solid)
color country boundary line color (default black)
antialiased antialiasing switch for country boundaries (default
True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the country boundaries (if not
specified uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# read in country line segments, only keeping those that
# intersect map boundary polygon.
if not hasattr(self,'cntrysegs'):
self.cntrysegs, types = self._readboundarydata('countries')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
countries = LineCollection(self.cntrysegs,antialiaseds=(antialiased,))
countries.set_color(color)
countries.set_linestyle(linestyle)
countries.set_linewidth(linewidth)
countries.set_label('_nolabel_')
if zorder is not None:
countries.set_zorder(zorder)
ax.add_collection(countries)
# clip countries for round polar plots.
if self.round: countries,c = self._clipcircle(ax,countries)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return countries
def drawstates(self,linewidth=0.5,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw state boundaries in Americas.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth state boundary line width (default 0.5)
linestyle coastline linestyle (default solid)
color state boundary line color (default black)
antialiased antialiasing switch for state boundaries
(default True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the state boundaries (if not
specified, uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# read in state line segments, only keeping those that
# intersect map boundary polygon.
if not hasattr(self,'statesegs'):
self.statesegs, types = self._readboundarydata('states')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
states = LineCollection(self.statesegs,antialiaseds=(antialiased,))
states.set_color(color)
states.set_linestyle(linestyle)
states.set_linewidth(linewidth)
states.set_label('_nolabel_')
if zorder is not None:
states.set_zorder(zorder)
ax.add_collection(states)
# clip states for round polar plots.
if self.round: states,c = self._clipcircle(ax,states)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return states
def drawcounties(self,linewidth=0.1,linestyle='solid',color='k',antialiased=1,
ax=None,zorder=None,drawbounds=False):
"""
Draw county boundaries in US. The county boundary shapefile
originates with the NOAA Coastal Geospatial Data Project
(http://coastalgeospatial.noaa.gov/data_gis.html).
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth county boundary line width (default 0.1)
linestyle coastline linestyle (default solid)
color county boundary line color (default black)
antialiased antialiasing switch for county boundaries
(default True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the county boundaries (if not
specified, uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
ax = ax or self._check_ax()
gis_file = os.path.join(basemap_datadir,'UScounties')
county_info = self.readshapefile(gis_file,'counties',\
default_encoding='latin-1',drawbounds=drawbounds)
counties = [coords for coords in self.counties]
counties = LineCollection(counties)
counties.set_linestyle(linestyle)
counties.set_linewidth(linewidth)
counties.set_color(color)
counties.set_label('counties')
if zorder:
counties.set_zorder(zorder)
ax.add_collection(counties)
return counties
def drawrivers(self,linewidth=0.5,linestyle='solid',color='k',antialiased=1,ax=None,zorder=None):
"""
Draw major rivers.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
linewidth river boundary line width (default 0.5)
linestyle coastline linestyle (default solid)
color river boundary line color (default black)
antialiased antialiasing switch for river boundaries (default
True).
ax axes instance (overrides default axes instance)
zorder sets the zorder for the rivers (if not
specified uses default zorder for
matplotlib.patches.LineCollections).
============== ====================================================
returns a matplotlib.patches.LineCollection object.
"""
if self.resolution is None:
raise AttributeError('there are no boundary datasets associated with this Basemap instance')
# read in river line segments, only keeping those that
# intersect map boundary polygon.
if not hasattr(self,'riversegs'):
self.riversegs, types = self._readboundarydata('rivers')
# get current axes instance (if none specified).
ax = ax or self._check_ax()
rivers = LineCollection(self.riversegs,antialiaseds=(antialiased,))
rivers.set_color(color)
rivers.set_linestyle(linestyle)
rivers.set_linewidth(linewidth)
rivers.set_label('_nolabel_')
if zorder is not None:
rivers.set_zorder(zorder)
ax.add_collection(rivers)
# clip rivers for round polar plots.
if self.round: rivers,c = self._clipcircle(ax,rivers)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return rivers
def is_land(self,xpt,ypt):
"""
Returns True if the given x,y point (in projection coordinates) is
over land, False otherwise. The definition of land is based upon
the GSHHS coastline polygons associated with the class instance.
Points over lakes inside land regions are not counted as land points.
"""
if self.resolution is None: return None
landpt = False
for poly in self.landpolygons:
landpt = _geoslib.Point((xpt,ypt)).within(poly)
if landpt: break
lakept = False
for poly in self.lakepolygons:
lakept = _geoslib.Point((xpt,ypt)).within(poly)
if lakept: break
return landpt and not lakept
def readshapefile(self,shapefile,name,drawbounds=True,zorder=None,
linewidth=0.5,color='k',antialiased=1,ax=None,
default_encoding='utf-8'):
"""
Read in shape file, optionally draw boundaries on map.
.. note::
- Assumes shapes are 2D
- only works for Point, MultiPoint, Polyline and Polygon shapes.
- vertices/points must be in geographic (lat/lon) coordinates.
Mandatory Arguments:
.. tabularcolumns:: |l|L|
============== ====================================================
Argument Description
============== ====================================================
shapefile path to shapefile components. Example:
shapefile='/home/jeff/esri/world_borders' assumes
that world_borders.shp, world_borders.shx and
world_borders.dbf live in /home/jeff/esri.
name name for Basemap attribute to hold the shapefile
vertices or points in map projection
coordinates. Class attribute name+'_info' is a list
of dictionaries, one for each shape, containing
attributes of each shape from dbf file, For
example, if name='counties', self.counties
will be a list of x,y vertices for each shape in
map projection coordinates and self.counties_info
will be a list of dictionaries with shape
attributes. Rings in individual Polygon
shapes are split out into separate polygons, and
additional keys 'RINGNUM' and 'SHAPENUM' are added
to the shape attribute dictionary.
============== ====================================================
The following optional keyword arguments are only relevant for Polyline
and Polygon shape types, for Point and MultiPoint shapes they are
ignored.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
drawbounds draw boundaries of shapes (default True).
zorder shape boundary zorder (if not specified,
default for mathplotlib.lines.LineCollection
is used).
linewidth shape boundary line width (default 0.5)
color shape boundary line color (default black)
antialiased antialiasing switch for shape boundaries
(default True).
ax axes instance (overrides default axes instance)
============== ====================================================
A tuple (num_shapes, type, min, max) containing shape file info
is returned.
num_shapes is the number of shapes, type is the type code (one of
the SHPT* constants defined in the shapelib module, see
http://shapelib.maptools.org/shp_api.html) and min and
max are 4-element lists with the minimum and maximum values of the
vertices. If ``drawbounds=True`` a
matplotlib.patches.LineCollection object is appended to the tuple.
"""
import shapefile as shp
from .shapefile import Reader
shp.default_encoding = default_encoding
if not os.path.exists('%s.shp'%shapefile):
raise IOError('cannot locate %s.shp'%shapefile)
if not os.path.exists('%s.shx'%shapefile):
raise IOError('cannot locate %s.shx'%shapefile)
if not os.path.exists('%s.dbf'%shapefile):
raise IOError('cannot locate %s.dbf'%shapefile)
# open shapefile, read vertices for each object, convert
# to map projection coordinates (only works for 2D shape types).
try:
shf = Reader(shapefile)
except:
raise IOError('error reading shapefile %s.shp' % shapefile)
fields = shf.fields
coords = []; attributes = []
msg=dedent("""
shapefile must have lat/lon vertices - it looks like this one has vertices
in map projection coordinates. You can convert the shapefile to geographic
coordinates using the shpproj utility from the shapelib tools
(http://shapelib.maptools.org/shapelib-tools.html)""")
shptype = shf.shapes()[0].shapeType
bbox = shf.bbox.tolist()
info = (shf.numRecords,shptype,bbox[0:2]+[0.,0.],bbox[2:]+[0.,0.])
npoly = 0
for shprec in shf.shapeRecords():
shp = shprec.shape; rec = shprec.record
npoly = npoly + 1
if shptype != shp.shapeType:
raise ValueError('readshapefile can only handle a single shape type per file')
if shptype not in [1,3,5,8]:
raise ValueError('readshapefile can only handle 2D shape types')
verts = shp.points
if shptype in [1,8]: # a Point or MultiPoint shape.
lons, lats = list(zip(*verts))
if max(lons) > 721. or min(lons) < -721. or max(lats) > 90.01 or min(lats) < -90.01:
raise ValueError(msg)
# if latitude is slightly greater than 90, truncate to 90
lats = [max(min(lat, 90.0), -90.0) for lat in lats]
if len(verts) > 1: # MultiPoint
x,y = self(lons, lats)
coords.append(list(zip(x,y)))
else: # single Point
x,y = self(lons[0], lats[0])
coords.append((x,y))
attdict={}
for r,key in zip(rec,fields[1:]):
attdict[key[0]]=r
attributes.append(attdict)
else: # a Polyline or Polygon shape.
parts = shp.parts.tolist()
ringnum = 0
for indx1,indx2 in zip(parts,parts[1:]+[len(verts)]):
ringnum = ringnum + 1
lons, lats = list(zip(*verts[indx1:indx2]))
if max(lons) > 721. or min(lons) < -721. or max(lats) > 90.01 or min(lats) < -90.01:
raise ValueError(msg)
# if latitude is slightly greater than 90, truncate to 90
lats = [max(min(lat, 90.0), -90.0) for lat in lats]
x, y = self(lons, lats)
coords.append(list(zip(x,y)))
attdict={}
for r,key in zip(rec,fields[1:]):
attdict[key[0]]=r
# add information about ring number to dictionary.
attdict['RINGNUM'] = ringnum
attdict['SHAPENUM'] = npoly
attributes.append(attdict)
# draw shape boundaries for polylines, polygons using LineCollection.
if shptype not in [1,8] and drawbounds:
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# make LineCollections for each polygon.
lines = LineCollection(coords,antialiaseds=(1,))
lines.set_color(color)
lines.set_linewidth(linewidth)
lines.set_label('_nolabel_')
if zorder is not None:
lines.set_zorder(zorder)
ax.add_collection(lines)
# clip boundaries for round polar plots.
if self.round: lines,c = self._clipcircle(ax,lines)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
info = info + (lines,)
self.__dict__[name]=coords
self.__dict__[name+'_info']=attributes
return info
def drawparallels(self,circles,color='k',linewidth=1.,zorder=None, \
dashes=[1,1],labels=[0,0,0,0],labelstyle=None, \
fmt='%g',xoffset=None,yoffset=None,ax=None,latmax=None,
**kwargs):
"""
Draw and label parallels (latitude lines) for values (in degrees)
given in the sequence ``circles``.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
color color to draw parallels (default black).
linewidth line width for parallels (default 1.)
zorder sets the zorder for parallels (if not specified,
uses default zorder for matplotlib.lines.Line2D
objects).
dashes dash pattern for parallels (default [1,1], i.e.
1 pixel on, 1 pixel off).
labels list of 4 values (default [0,0,0,0]) that control
whether parallels are labelled where they intersect
the left, right, top or bottom of the plot. For
example labels=[1,0,0,1] will cause parallels
to be labelled where they intersect the left and
and bottom of the plot, but not the right and top.
labelstyle if set to "+/-", north and south latitudes are
labelled with "+" and "-", otherwise they are
labelled with "N" and "S".
fmt a format string to format the parallel labels
(default '%g') **or** a function that takes a
latitude value in degrees as it's only argument
and returns a formatted string.
xoffset label offset from edge of map in x-direction
(default is 0.01 times width of map in map
projection coordinates).
yoffset label offset from edge of map in y-direction
(default is 0.01 times height of map in map
projection coordinates).
ax axes instance (overrides default axes instance)
latmax absolute value of latitude to which meridians are drawn
(default is 80).
\**kwargs additional keyword arguments controlling text
for labels that are passed on to
the text method of the axes instance (see
matplotlib.pyplot.text documentation).
============== ====================================================
returns a dictionary whose keys are the parallel values, and
whose values are tuples containing lists of the
matplotlib.lines.Line2D and matplotlib.text.Text instances
associated with each parallel. Deleting an item from the
dictionary removes the corresponding parallel from the plot.
"""
# if celestial=True, don't use "N" and "S" labels.
if labelstyle is None and self.celestial:
labelstyle="+/-"
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# don't draw meridians past latmax, always draw parallel at latmax.
if latmax is None: latmax = 80.
# offset for labels.
if yoffset is None:
yoffset = (self.urcrnry-self.llcrnry)/100.
if self.aspect > 1:
yoffset = self.aspect*yoffset
else:
yoffset = yoffset/self.aspect
if xoffset is None:
xoffset = (self.urcrnrx-self.llcrnrx)/100.
if self.projection in _cylproj + _pseudocyl:
lons = np.linspace(self.llcrnrlon, self.urcrnrlon, 10001)
elif self.projection in ['tmerc']:
lon_0 = self.projparams['lon_0']
# tmerc only defined within +/- 90 degrees of lon_0
lons = np.linspace(lon_0-90,lon_0+90,100001)
else:
lonmin = self.boundarylonmin; lonmax = self.boundarylonmax
lons = np.linspace(lonmin, lonmax, 10001)
# make sure latmax degree parallel is drawn if projection not merc or cyl or miller
try:
circlesl = list(circles)
except:
circlesl = circles
if self.projection not in _cylproj + _pseudocyl:
if max(circlesl) > 0 and latmax not in circlesl:
circlesl.append(latmax)
if min(circlesl) < 0 and -latmax not in circlesl:
circlesl.append(-latmax)
xdelta = 0.01*(self.xmax-self.xmin)
ydelta = 0.01*(self.ymax-self.ymin)
linecolls = {}
for circ in circlesl:
lats = circ*np.ones(len(lons),np.float32)
x,y = self(lons,lats)
# remove points outside domain.
# leave a little slop around edges (3*xdelta)
# don't really know why, but this appears to be needed to
# or lines sometimes don't reach edge of plot.
testx = np.logical_and(x>=self.xmin-3*xdelta,x<=self.xmax+3*xdelta)
x = np.compress(testx, x)
y = np.compress(testx, y)
testy = np.logical_and(y>=self.ymin-3*ydelta,y<=self.ymax+3*ydelta)
x = np.compress(testy, x)
y = np.compress(testy, y)
lines = []
if len(x) > 1 and len(y) > 1:
# split into separate line segments if necessary.
# (not necessary for cylindrical or pseudocylindricl projections)
xd = (x[1:]-x[0:-1])**2
yd = (y[1:]-y[0:-1])**2
dist = np.sqrt(xd+yd)
if self.projection not in ['cyl','rotpole']:
split = dist > self.rmajor/10.
else:
split = dist > 1.
if np.sum(split) and self.projection not in _cylproj:
ind = (np.compress(split,np.squeeze(split*np.indices(xd.shape)))+1).tolist()
xl = []
yl = []
iprev = 0
ind.append(len(xd))
for i in ind:
xl.append(x[iprev:i])
yl.append(y[iprev:i])
iprev = i
else:
xl = [x]
yl = [y]
# draw each line segment.
for x,y in zip(xl,yl):
# skip if only a point.
if len(x) > 1 and len(y) > 1:
l = Line2D(x,y,linewidth=linewidth)
l.set_color(color)
l.set_dashes(dashes)
l.set_label('_nolabel_')
if zorder is not None:
l.set_zorder(zorder)
ax.add_line(l)
lines.append(l)
linecolls[circ] = (lines,[])
# draw labels for parallels
# parallels not labelled for fulldisk orthographic or geostationary
if self.projection in ['ortho','geos','nsper','vandg','aeqd'] and max(labels):
if self.projection == 'vandg' or self._fulldisk:
sys.stdout.write('Warning: Cannot label parallels on %s basemap' % _projnames[self.projection])
labels = [0,0,0,0]
# search along edges of map to see if parallels intersect.
# if so, find x,y location of intersection and draw a label there.
dx = (self.xmax-self.xmin)/1000.
dy = (self.ymax-self.ymin)/1000.
if self.projection in _pseudocyl:
lon_0 = self.projparams['lon_0']
for dolab,side in zip(labels,['l','r','t','b']):
if not dolab: continue
# for cylindrical projections, don't draw parallels on top or bottom.
if self.projection in _cylproj + _pseudocyl and side in ['t','b']: continue
if side in ['l','r']:
nmax = int((self.ymax-self.ymin)/dy+1)
yy = np.linspace(self.llcrnry,self.urcrnry,nmax)
if side == 'l':
if self.projection in _pseudocyl:
lats = np.linspace(-89.99,89,99,nmax)
if self.celestial:
lons = (self.projparams['lon_0']+180.)*np.ones(len(lats),lats.dtype)
else:
lons = (self.projparams['lon_0']-180.)*np.ones(len(lats),lats.dtype)
xx, yy = self(lons, lats)
else:
xx = self.llcrnrx*np.ones(yy.shape,yy.dtype)
lons,lats = self(xx,yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
if self.projection in _pseudocyl:
lats = np.linspace(-89.99,89,99,nmax)
if self.celestial:
lons = (self.projparams['lon_0']-180.)*np.ones(len(lats),lats.dtype)
else:
lons = (self.projparams['lon_0']+180.)*np.ones(len(lats),lats.dtype)
xx, yy = self(lons, lats)
else:
xx = self.urcrnrx*np.ones(yy.shape,yy.dtype)
lons,lats = self(xx,yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
else:
nmax = int((self.xmax-self.xmin)/dx+1)
xx = np.linspace(self.llcrnrx,self.urcrnrx,nmax)
if side == 'b':
lons,lats = self(xx,self.llcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
lons,lats = self(xx,self.urcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
for lat in circles:
# don't label parallels for round polar plots
if self.round: continue
# find index of parallel (there may be two, so
# search from left and right).
nl = _searchlist(lats,lat)
nr = _searchlist(lats[::-1],lat)
if nr != -1: nr = len(lons)-nr-1
latlab = _setlatlab(fmt,lat,labelstyle)
# parallels can intersect each map edge twice.
for i,n in enumerate([nl,nr]):
# don't bother if close to the first label.
if i and abs(nr-nl) < 100: continue
if n >= 0:
t = None
if side == 'l':
if self.projection in _pseudocyl:
if self.celestial:
xlab,ylab = self(lon_0+179.9,lat)
else:
xlab,ylab = self(lon_0-179.9,lat)
else:
xlab = self.llcrnrx
xlab = xlab-xoffset
if self.projection in _pseudocyl:
if lat>0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='bottom',**kwargs)
elif lat<0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='top',**kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='center',**kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='right',verticalalignment='center',**kwargs)
elif side == 'r':
if self.projection in _pseudocyl:
if self.celestial:
xlab,ylab = self(lon_0-179.9,lat)
else:
xlab,ylab = self(lon_0+179.9,lat)
else:
xlab = self.urcrnrx
xlab = xlab+xoffset
if self.projection in _pseudocyl:
if lat>0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='bottom',**kwargs)
elif lat<0:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='top',**kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='center',**kwargs)
else:
t=ax.text(xlab,yy[n],latlab,horizontalalignment='left',verticalalignment='center',**kwargs)
elif side == 'b':
t = ax.text(xx[n],self.llcrnry-yoffset,latlab,horizontalalignment='center',verticalalignment='top',**kwargs)
else:
t = ax.text(xx[n],self.urcrnry+yoffset,latlab,horizontalalignment='center',verticalalignment='bottom',**kwargs)
if t is not None: linecolls[lat][1].append(t)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
keys = list(linecolls.keys()); vals = list(linecolls.values())
for k,v in zip(keys,vals):
if v == ([], []):
del linecolls[k]
# add a remove method to each tuple.
else:
linecolls[k] = _tup(linecolls[k])
# override __delitem__ in dict to call remove() on values.
pardict = _dict(linecolls)
# clip parallels for round polar plots (and delete labels).
if self.round:
c = Circle((0.5*(self.xmax+self.xmin),0.5*(self.ymax+self.ymin)),
radius=0.5*(self.xmax-self.xmin),fc='none')
if c not in ax.patches:
p = ax.add_patch(c)
p.set_clip_on(False)
for par in pardict:
lines,labs = pardict[par]
for l in lines:
l.set_clip_path(c)
return pardict
def drawmeridians(self,meridians,color='k',linewidth=1., zorder=None,\
dashes=[1,1],labels=[0,0,0,0],labelstyle=None,\
fmt='%g',xoffset=None,yoffset=None,ax=None,latmax=None,
**kwargs):
"""
Draw and label meridians (longitude lines) for values (in degrees)
given in the sequence ``meridians``.
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
color color to draw meridians (default black).
linewidth line width for meridians (default 1.)
zorder sets the zorder for meridians (if not specified,
uses default zorder for matplotlib.lines.Line2D
objects).
dashes dash pattern for meridians (default [1,1], i.e.
1 pixel on, 1 pixel off).
labels list of 4 values (default [0,0,0,0]) that control
whether meridians are labelled where they intersect
the left, right, top or bottom of the plot. For
example labels=[1,0,0,1] will cause meridians
to be labelled where they intersect the left and
and bottom of the plot, but not the right and top.
labelstyle if set to "+/-", east and west longitudes are
labelled with "+" and "-", otherwise they are
labelled with "E" and "W".
fmt a format string to format the meridian labels
(default '%g') **or** a function that takes a
longitude value in degrees as it's only argument
and returns a formatted string.
xoffset label offset from edge of map in x-direction
(default is 0.01 times width of map in map
projection coordinates).
yoffset label offset from edge of map in y-direction
(default is 0.01 times height of map in map
projection coordinates).
ax axes instance (overrides default axes instance)
latmax absolute value of latitude to which meridians are drawn
(default is 80).
\**kwargs additional keyword arguments controlling text
for labels that are passed on to
the text method of the axes instance (see
matplotlib.pyplot.text documentation).
============== ====================================================
returns a dictionary whose keys are the meridian values, and
whose values are tuples containing lists of the
matplotlib.lines.Line2D and matplotlib.text.Text instances
associated with each meridian. Deleting an item from the
dictionary removes the correpsonding meridian from the plot.
"""
# for cylindrical projections, try to handle wraparound (i.e. if
# projection is defined in -180 to 0 and user asks for meridians from
# 180 to 360 to be drawn, it should work)
if self.projection in _cylproj or self.projection in _pseudocyl:
def addlon(meridians,madd):
minside = (madd >= self.llcrnrlon and madd <= self.urcrnrlon)
if minside and madd not in meridians: meridians.append(madd)
return meridians
merids = list(meridians)
meridians = []
for m in merids:
meridians = addlon(meridians,m)
meridians = addlon(meridians,m+360)
meridians = addlon(meridians,m-360)
meridians.sort()
# if celestial=True, don't use "E" and "W" labels.
if labelstyle is None and self.celestial:
labelstyle="+/-"
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# don't draw meridians past latmax, always draw parallel at latmax.
if latmax is None: latmax = 80. # unused w/ cyl, merc or miller proj.
# offset for labels.
if yoffset is None:
yoffset = (self.urcrnry-self.llcrnry)/100.
if self.aspect > 1:
yoffset = self.aspect*yoffset
else:
yoffset = yoffset/self.aspect
if xoffset is None:
xoffset = (self.urcrnrx-self.llcrnrx)/100.
lats = np.linspace(self.latmin,self.latmax,10001)
if self.projection not in _cylproj + _pseudocyl:
testlat = np.logical_and(lats>-latmax,lats<latmax)
lats = np.compress(testlat,lats)
xdelta = 0.01*(self.xmax-self.xmin)
ydelta = 0.01*(self.ymax-self.ymin)
linecolls = {}
for merid in meridians:
lons = merid*np.ones(len(lats),np.float32)
x,y = self(lons,lats)
# remove points outside domain.
# leave a little slop around edges (3*xdelta)
# don't really know why, but this appears to be needed to
# or lines sometimes don't reach edge of plot.
testx = np.logical_and(x>=self.xmin-3*xdelta,x<=self.xmax+3*xdelta)
x = np.compress(testx, x)
y = np.compress(testx, y)
testy = np.logical_and(y>=self.ymin-3*ydelta,y<=self.ymax+3*ydelta)
x = np.compress(testy, x)
y = np.compress(testy, y)
lines = []
if len(x) > 1 and len(y) > 1:
# split into separate line segments if necessary.
# (not necessary for mercator or cylindrical or miller).
xd = (x[1:]-x[0:-1])**2
yd = (y[1:]-y[0:-1])**2
dist = np.sqrt(xd+yd)
if self.projection not in ['cyl','rotpole']:
split = dist > self.rmajor/10.
else:
split = dist > 1.
if np.sum(split) and self.projection not in _cylproj:
ind = (np.compress(split,np.squeeze(split*np.indices(xd.shape)))+1).tolist()
xl = []
yl = []
iprev = 0
ind.append(len(xd))
for i in ind:
xl.append(x[iprev:i])
yl.append(y[iprev:i])
iprev = i
else:
xl = [x]
yl = [y]
# draw each line segment.
for x,y in zip(xl,yl):
# skip if only a point.
if len(x) > 1 and len(y) > 1:
l = Line2D(x,y,linewidth=linewidth)
l.set_color(color)
l.set_dashes(dashes)
l.set_label('_nolabel_')
if zorder is not None:
l.set_zorder(zorder)
ax.add_line(l)
lines.append(l)
linecolls[merid] = (lines,[])
# draw labels for meridians.
# meridians not labelled for sinusoidal, hammer, mollweide,
# VanDerGrinten or full-disk orthographic/geostationary.
if self.projection in ['sinu','moll','hammer','vandg'] and max(labels):
sys.stdout.write('Warning: Cannot label meridians on %s basemap' % _projnames[self.projection])
labels = [0,0,0,0]
if self.projection in ['ortho','geos','nsper','aeqd'] and max(labels):
if self._fulldisk and self.boundinglat is None:
sys.stdout.write(dedent(
"""'Warning: Cannot label meridians on full-disk
Geostationary, Orthographic or Azimuthal equidistant basemap
"""))
labels = [0,0,0,0]
# search along edges of map to see if parallels intersect.
# if so, find x,y location of intersection and draw a label there.
dx = (self.xmax-self.xmin)/1000.
dy = (self.ymax-self.ymin)/1000.
if self.projection in _pseudocyl:
lon_0 = self.projparams['lon_0']
xmin,ymin = self(lon_0-179.9,-90)
xmax,ymax = self(lon_0+179.9,90)
for dolab,side in zip(labels,['l','r','t','b']):
if not dolab or self.round: continue
# for cylindrical projections, don't draw meridians on left or right.
if self.projection in _cylproj + _pseudocyl and side in ['l','r']: continue
if side in ['l','r']:
nmax = int((self.ymax-self.ymin)/dy+1)
yy = np.linspace(self.llcrnry,self.urcrnry,nmax)
if side == 'l':
lons,lats = self(self.llcrnrx*np.ones(yy.shape,np.float32),yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
lons,lats = self(self.urcrnrx*np.ones(yy.shape,np.float32),yy,inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
else:
nmax = int((self.xmax-self.xmin)/dx+1)
if self.projection in _pseudocyl:
xx = np.linspace(xmin,xmax,nmax)
else:
xx = np.linspace(self.llcrnrx,self.urcrnrx,nmax)
if side == 'b':
lons,lats = self(xx,self.llcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
else:
lons,lats = self(xx,self.urcrnry*np.ones(xx.shape,np.float32),inverse=True)
lons = lons.tolist(); lats = lats.tolist()
if max(lons) > 1.e20 or max(lats) > 1.e20:
raise ValueError('inverse transformation undefined - please adjust the map projection region')
# adjust so 0 <= lons < 360
lons = [(lon+360) % 360 for lon in lons]
for lon in meridians:
# adjust so 0 <= lon < 360
lon2 = (lon+360) % 360
# find index of meridian (there may be two, so
# search from left and right).
nl = _searchlist(lons,lon2)
nr = _searchlist(lons[::-1],lon2)
if nr != -1: nr = len(lons)-nr-1
lonlab = _setlonlab(fmt,lon2,labelstyle)
# meridians can intersect each map edge twice.
for i,n in enumerate([nl,nr]):
lat = lats[n]/100.
# no meridians > latmax for projections other than merc,cyl,miller.
if self.projection not in _cylproj and lat > latmax: continue
# don't bother if close to the first label.
if i and abs(nr-nl) < 100: continue
if n >= 0:
t = None
if side == 'l':
t = ax.text(self.llcrnrx-xoffset,yy[n],lonlab,horizontalalignment='right',verticalalignment='center',**kwargs)
elif side == 'r':
t = ax.text(self.urcrnrx+xoffset,yy[n],lonlab,horizontalalignment='left',verticalalignment='center',**kwargs)
elif side == 'b':
t = ax.text(xx[n],self.llcrnry-yoffset,lonlab,horizontalalignment='center',verticalalignment='top',**kwargs)
else:
t = ax.text(xx[n],self.urcrnry+yoffset,lonlab,horizontalalignment='center',verticalalignment='bottom',**kwargs)
if t is not None: linecolls[lon][1].append(t)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# remove empty values from linecolls dictionary
keys = list(linecolls.keys()); vals = list(linecolls.values())
for k,v in zip(keys,vals):
if v == ([], []):
del linecolls[k]
else:
# add a remove method to each tuple.
linecolls[k] = _tup(linecolls[k])
# override __delitem__ in dict to call remove() on values.
meridict = _dict(linecolls)
# for round polar plots, clip meridian lines and label them.
if self.round:
c = Circle((0.5*(self.xmax+self.xmin),0.5*(self.ymax+self.ymin)),
radius=0.5*(self.xmax-self.xmin),fc='none')
if c not in ax.patches:
p = ax.add_patch(c)
p.set_clip_on(False)
# label desired?
label = False
for lab in labels:
if lab: label = True
for merid in meridict:
lines,labs = meridict[merid]
# clip lines.
for l in lines:
l.set_clip_path(c)
if not label: continue
# label
lonlab = _setlonlab(fmt,merid,labelstyle)
x,y = self(merid,self.boundinglat)
r = np.sqrt((x-0.5*(self.xmin+self.xmax))**2+
(y-0.5*(self.ymin+self.ymax))**2)
r = r + np.sqrt(xoffset**2+yoffset**2)
if self.projection.startswith('np'):
pole = 1
elif self.projection.startswith('sp'):
pole = -1
elif self.projection == 'ortho' and self.round:
pole = 1
if pole == 1:
theta = (np.pi/180.)*(merid-self.projparams['lon_0']-90)
if self.projection == 'ortho' and\
self.projparams['lat_0'] == -90:
theta = (np.pi/180.)*(-merid+self.projparams['lon_0']+90)
x = r*np.cos(theta)+0.5*(self.xmin+self.xmax)
y = r*np.sin(theta)+0.5*(self.ymin+self.ymax)
if x > 0.5*(self.xmin+self.xmax)+xoffset:
horizalign = 'left'
elif x < 0.5*(self.xmin+self.xmax)-xoffset:
horizalign = 'right'
else:
horizalign = 'center'
if y > 0.5*(self.ymin+self.ymax)+yoffset:
vertalign = 'bottom'
elif y < 0.5*(self.ymin+self.ymax)-yoffset:
vertalign = 'top'
else:
vertalign = 'center'
# labels [l,r,t,b]
if labels[0] and not labels[1] and x >= 0.5*(self.xmin+self.xmax)+xoffset: continue
if labels[1] and not labels[0] and x <= 0.5*(self.xmin+self.xmax)-xoffset: continue
if labels[2] and not labels[3] and y <= 0.5*(self.ymin+self.ymax)-yoffset: continue
if labels[3] and not labels[2]and y >= 0.5*(self.ymin+self.ymax)+yoffset: continue
elif pole == -1:
theta = (np.pi/180.)*(-merid+self.projparams['lon_0']+90)
x = r*np.cos(theta)+0.5*(self.xmin+self.xmax)
y = r*np.sin(theta)+0.5*(self.ymin+self.ymax)
if x > 0.5*(self.xmin+self.xmax)-xoffset:
horizalign = 'right'
elif x < 0.5*(self.xmin+self.xmax)+xoffset:
horizalign = 'left'
else:
horizalign = 'center'
if y > 0.5*(self.ymin+self.ymax)-yoffset:
vertalign = 'top'
elif y < 0.5*(self.ymin+self.ymax)+yoffset:
vertalign = 'bottom'
else:
vertalign = 'center'
# labels [l,r,t,b]
if labels[0] and not labels[1] and x <= 0.5*(self.xmin+self.xmax)+xoffset: continue
if labels[1] and not labels[0] and x >= 0.5*(self.xmin+self.xmax)-xoffset: continue
if labels[2] and not labels[3] and y >= 0.5*(self.ymin+self.ymax)-yoffset: continue
if labels[3] and not labels[2] and y <= 0.5*(self.ymin+self.ymax)+yoffset: continue
t =\
ax.text(x,y,lonlab,horizontalalignment=horizalign,verticalalignment=vertalign,**kwargs)
meridict[merid][1].append(t)
return meridict
def tissot(self,lon_0,lat_0,radius_deg,npts,ax=None,**kwargs):
"""
Draw a polygon centered at ``lon_0,lat_0``. The polygon
approximates a circle on the surface of the earth with radius
``radius_deg`` degrees latitude along longitude ``lon_0``,
made up of ``npts`` vertices.
The polygon represents a Tissot's indicatrix
(http://en.wikipedia.org/wiki/Tissot's_Indicatrix),
which when drawn on a map shows the distortion
inherent in the map projection.
.. note::
Cannot handle situations in which the polygon intersects
the edge of the map projection domain, and then re-enters the domain.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.patches.Polygon.
returns a matplotlib.patches.Polygon object."""
ax = kwargs.pop('ax', None) or self._check_ax()
g = pyproj.Geod(a=self.rmajor,b=self.rminor)
az12,az21,dist = g.inv(lon_0,lat_0,lon_0,lat_0+radius_deg)
seg = [self(lon_0,lat_0+radius_deg)]
delaz = 360./npts
az = az12
for n in range(npts):
az = az+delaz
lon, lat, az21 = g.fwd(lon_0, lat_0, az, dist)
x,y = self(lon,lat)
# add segment if it is in the map projection region.
if x < 1.e20 and y < 1.e20:
seg.append((x,y))
poly = Polygon(seg,**kwargs)
ax.add_patch(poly)
# clip polygons for round polar plots.
if self.round: poly,c = self._clipcircle(ax,poly)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return poly
def gcpoints(self,lon1,lat1,lon2,lat2,npoints):
"""
compute ``points`` points along a great circle with endpoints
``(lon1,lat1)`` and ``(lon2,lat2)``.
Returns arrays x,y with map projection coordinates.
"""
gc = pyproj.Geod(a=self.rmajor,b=self.rminor)
lonlats = gc.npts(lon1,lat1,lon2,lat2,npoints-2)
lons=[lon1];lats=[lat1]
for lon,lat in lonlats:
lons.append(lon); lats.append(lat)
lons.append(lon2); lats.append(lat2)
x, y = self(lons, lats)
return x,y
def drawgreatcircle(self,lon1,lat1,lon2,lat2,del_s=100.,**kwargs):
"""
Draw a great circle on the map from the longitude-latitude
pair ``lon1,lat1`` to ``lon2,lat2``
.. tabularcolumns:: |l|L|
============== =======================================================
Keyword Description
============== =======================================================
del_s points on great circle computed every del_s kilometers
(default 100).
\**kwargs other keyword arguments are passed on to :meth:`plot`
method of Basemap instance.
============== =======================================================
.. note::
Cannot handle situations in which the great circle intersects
the edge of the map projection domain, and then re-enters the domain.
Returns a matplotlib.lines.Line2D object.
"""
# use great circle formula for a perfect sphere.
gc = pyproj.Geod(a=self.rmajor,b=self.rminor)
az12,az21,dist = gc.inv(lon1,lat1,lon2,lat2)
npoints = int((dist+0.5*1000.*del_s)/(1000.*del_s))
lonlats = gc.npts(lon1,lat1,lon2,lat2,npoints)
lons = [lon1]; lats = [lat1]
for lon, lat in lonlats:
lons.append(lon)
lats.append(lat)
lons.append(lon2); lats.append(lat2)
x, y = self(lons, lats)
return self.plot(x,y,**kwargs)
def transform_scalar(self,datin,lons,lats,nx,ny,returnxy=False,checkbounds=False,order=1,masked=False):
"""
Interpolate a scalar field (``datin``) from a lat/lon grid with
longitudes = ``lons`` and latitudes = ``lats`` to a ``ny`` by ``nx``
map projection grid. Typically used to transform data to
map projection coordinates for plotting on a map with
the :meth:`imshow`.
.. tabularcolumns:: |l|L|
============== ====================================================
Argument Description
============== ====================================================
datin input data on a lat/lon grid.
lons, lats rank-1 arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cea``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
nx, ny The size of the output regular grid in map
projection coordinates
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
returnxy If True, the x and y values of the map
projection grid are also returned (Default False).
checkbounds If True, values of lons and lats are checked to see
that they lie within the map projection region.
Default is False, and data outside map projection
region is clipped to values on boundary.
masked If True, interpolated data is returned as a masked
array with values outside map projection region
masked (Default False).
order 0 for nearest-neighbor interpolation, 1 for
bilinear, 3 for cubic spline (Default 1).
Cubic spline interpolation requires scipy.ndimage.
============== ====================================================
Returns ``datout`` (data on map projection grid).
If returnxy=True, returns ``data,x,y``.
"""
# check that lons, lats increasing
delon = lons[1:]-lons[0:-1]
delat = lats[1:]-lats[0:-1]
if min(delon) < 0. or min(delat) < 0.:
raise ValueError('lons and lats must be increasing!')
# check that lons in -180,180 for non-cylindrical projections.
if self.projection not in _cylproj:
lonsa = np.array(lons)
count = np.sum(lonsa < -180.00001) + np.sum(lonsa > 180.00001)
if count > 1:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
# allow for wraparound point to be outside.
elif count == 1 and math.fabs(lons[-1]-lons[0]-360.) > 1.e-4:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
if returnxy:
lonsout, latsout, x, y = self.makegrid(nx,ny,returnxy=True)
else:
lonsout, latsout = self.makegrid(nx,ny)
datout = interp(datin,lons,lats,lonsout,latsout,checkbounds=checkbounds,order=order,masked=masked)
if returnxy:
return datout, x, y
else:
return datout
def transform_vector(self,uin,vin,lons,lats,nx,ny,returnxy=False,checkbounds=False,order=1,masked=False):
"""
Rotate and interpolate a vector field (``uin,vin``) from a
lat/lon grid with longitudes = ``lons`` and latitudes = ``lats``
to a ``ny`` by ``nx`` map projection grid.
The input vector field is defined in spherical coordinates (it
has eastward and northward components) while the output
vector field is rotated to map projection coordinates (relative
to x and y). The magnitude of the vector is preserved.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
uin, vin input vector field on a lat/lon grid.
lons, lats rank-1 arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cea``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
nx, ny The size of the output regular grid in map
projection coordinates
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keyword Description
============== ====================================================
returnxy If True, the x and y values of the map
projection grid are also returned (Default False).
checkbounds If True, values of lons and lats are checked to see
that they lie within the map projection region.
Default is False, and data outside map projection
region is clipped to values on boundary.
masked If True, interpolated data is returned as a masked
array with values outside map projection region
masked (Default False).
order 0 for nearest-neighbor interpolation, 1 for
bilinear, 3 for cubic spline (Default 1).
Cubic spline interpolation requires scipy.ndimage.
============== ====================================================
Returns ``uout, vout`` (vector field on map projection grid).
If returnxy=True, returns ``uout,vout,x,y``.
"""
# check that lons, lats increasing
delon = lons[1:]-lons[0:-1]
delat = lats[1:]-lats[0:-1]
if min(delon) < 0. or min(delat) < 0.:
raise ValueError('lons and lats must be increasing!')
# check that lons in -180,180 for non-cylindrical projections.
if self.projection not in _cylproj:
lonsa = np.array(lons)
count = np.sum(lonsa < -180.00001) + np.sum(lonsa > 180.00001)
if count > 1:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
# allow for wraparound point to be outside.
elif count == 1 and math.fabs(lons[-1]-lons[0]-360.) > 1.e-4:
raise ValueError('grid must be shifted so that lons are monotonically increasing and fit in range -180,+180 (see shiftgrid function)')
lonsout, latsout, x, y = self.makegrid(nx,ny,returnxy=True)
# interpolate to map projection coordinates.
uin = interp(uin,lons,lats,lonsout,latsout,checkbounds=checkbounds,order=order,masked=masked)
vin = interp(vin,lons,lats,lonsout,latsout,checkbounds=checkbounds,order=order,masked=masked)
# rotate from geographic to map coordinates.
return self.rotate_vector(uin,vin,lonsout,latsout,returnxy=returnxy)
def rotate_vector(self,uin,vin,lons,lats,returnxy=False):
"""
Rotate a vector field (``uin,vin``) on a rectilinear grid
with longitudes = ``lons`` and latitudes = ``lats`` from
geographical (lat/lon) into map projection (x/y) coordinates.
Differs from transform_vector in that no interpolation is done.
The vector is returned on the same grid, but rotated into
x,y coordinates.
The input vector field is defined in spherical coordinates (it
has eastward and northward components) while the output
vector field is rotated to map projection coordinates (relative
to x and y). The magnitude of the vector is preserved.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
uin, vin input vector field on a lat/lon grid.
lons, lats Arrays containing longitudes and latitudes
(in degrees) of input data in increasing order.
For non-cylindrical projections (those other than
``cyl``, ``merc``, ``cyl``, ``gall`` and ``mill``) lons
must fit within range -180 to 180.
============== ====================================================
Returns ``uout, vout`` (rotated vector field).
If the optional keyword argument
``returnxy`` is True (default is False),
returns ``uout,vout,x,y`` (where ``x,y`` are the map projection
coordinates of the grid defined by ``lons,lats``).
"""
# if lons,lats are 1d and uin,vin are 2d, and
# lats describes 1st dim of uin,vin, and
# lons describes 2nd dim of uin,vin, make lons,lats 2d
# with meshgrid.
if lons.ndim == lats.ndim == 1 and uin.ndim == vin.ndim == 2 and\
uin.shape[1] == vin.shape[1] == lons.shape[0] and\
uin.shape[0] == vin.shape[0] == lats.shape[0]:
lons, lats = np.meshgrid(lons, lats)
else:
if not lons.shape == lats.shape == uin.shape == vin.shape:
raise TypeError("shapes of lons,lats and uin,vin don't match")
x, y = self(lons, lats)
# rotate from geographic to map coordinates.
if ma.isMaskedArray(uin):
mask = ma.getmaskarray(uin)
masked = True
uin = uin.filled(1)
vin = vin.filled(1)
else:
masked = False
# Map the (lon, lat) vector in the complex plane.
uvc = uin + 1j*vin
uvmag = np.abs(uvc)
theta = np.angle(uvc)
# Define a displacement (dlon, dlat) that moves all
# positions (lons, lats) a small distance in the
# direction of the original vector.
dc = 1E-5 * np.exp(theta*1j)
dlat = dc.imag * np.cos(np.radians(lats))
dlon = dc.real
# Deal with displacements that overshoot the North or South Pole.
farnorth = np.abs(lats+dlat) >= 90.0
somenorth = farnorth.any()
if somenorth:
dlon[farnorth] *= -1.0
dlat[farnorth] *= -1.0
# Add displacement to original location and find the native coordinates.
lon1 = lons + dlon
lat1 = lats + dlat
xn, yn = self(lon1, lat1)
# Determine the angle of the displacement in the native coordinates.
vecangle = np.arctan2(yn-y, xn-x)
if somenorth:
vecangle[farnorth] += np.pi
# Compute the x-y components of the original vector.
uvcout = uvmag * np.exp(1j*vecangle)
uout = uvcout.real
vout = uvcout.imag
if masked:
uout = ma.array(uout, mask=mask)
vout = ma.array(vout, mask=mask)
if returnxy:
return uout,vout,x,y
else:
return uout,vout
def set_axes_limits(self,ax=None):
"""
Final step in Basemap method wrappers of Axes plotting methods:
Set axis limits, fix aspect ratio for map domain using current
or specified axes instance. This is done only once per axes
instance.
In interactive mode, this method always calls draw_if_interactive
before returning.
"""
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# If we have already set the axes limits, and if the user
# has not defeated this by turning autoscaling back on,
# then all we need to do is plot if interactive.
if (hash(ax) in self._initialized_axes
and not ax.get_autoscalex_on()
and not ax.get_autoscaley_on()):
if is_interactive():
import matplotlib.pyplot as plt
plt.draw_if_interactive()
return
self._initialized_axes.add(hash(ax))
# Take control of axis scaling:
ax.set_autoscale_on(False)
# update data limits for map domain.
corners = ((self.llcrnrx,self.llcrnry), (self.urcrnrx,self.urcrnry))
ax.update_datalim( corners )
ax.set_xlim((self.llcrnrx, self.urcrnrx))
ax.set_ylim((self.llcrnry, self.urcrnry))
# if map boundary not yet drawn for elliptical maps, draw it with default values.
if not self._mapboundarydrawn or self._mapboundarydrawn not in ax.patches:
# elliptical map, draw boundary manually.
if (self.projection in ['ortho','geos','nsper','aeqd'] and
self._fulldisk) or self.round or self.projection in _pseudocyl:
# first draw boundary, no fill
limb1 = self.drawmapboundary(fill_color='none')
# draw another filled patch, with no boundary.
limb2 = self.drawmapboundary(linewidth=0)
self._mapboundarydrawn = limb2
# for elliptical map, always turn off axis_frame.
if (self.projection in ['ortho','geos','nsper','aeqd'] and
self._fulldisk) or self.round or self.projection in _pseudocyl:
# turn off axes frame.
ax.set_frame_on(False)
# make sure aspect ratio of map preserved.
# plot is re-centered in bounding rectangle.
# (anchor instance var determines where plot is placed)
if self.fix_aspect:
ax.set_aspect('equal',anchor=self.anchor)
else:
ax.set_aspect('auto',anchor=self.anchor)
# make sure axis ticks are turned off.
if self.noticks:
ax.set_xticks([])
ax.set_yticks([])
# force draw if in interactive mode.
if is_interactive():
import matplotlib.pyplot as plt
plt.draw_if_interactive()
@_transform1d
def scatter(self, *args, **kwargs):
"""
Plot points with markers on the map
(see matplotlib.pyplot.scatter documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axes instance.
Other \**kwargs passed on to matplotlib.pyplot.scatter.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
ret = ax.scatter(*args, **kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# clip for round polar plots.
if self.round: ret,c = self._clipcircle(ax,ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return ret
@_transform1d
def plot(self, *args, **kwargs):
"""
Draw lines and/or markers on the map
(see matplotlib.pyplot.plot documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.plot.
"""
ax = kwargs.pop('ax', None) or self._check_ax()
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
ret = ax.plot(*args, **kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
# clip for round polar plots.
if self.round: ret,c = self._clipcircle(ax,ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return ret
def imshow(self, *args, **kwargs):
"""
Display an image over the map
(see matplotlib.pyplot.imshow documentation).
``extent`` and ``origin`` keywords set automatically so image
will be drawn over map region.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.plot.
returns an matplotlib.image.AxesImage instance.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
kwargs['extent']=(self.llcrnrx,self.urcrnrx,self.llcrnry,self.urcrnry)
# use origin='lower', unless overridden.
if 'origin' not in kwargs:
kwargs['origin']='lower'
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
ret = ax.imshow(*args, **kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# clip image for round polar plots.
if self.round: ret,c = self._clipcircle(ax,ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return ret
@_transform
def pcolor(self,x,y,data,**kwargs):
"""
Make a pseudo-color plot over the map
(see matplotlib.pyplot.pcolor documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
If x or y are outside projection limb (i.e. they have values > 1.e20)
they will be convert to masked arrays with those values masked.
As a result, those values will not be plotted.
If ``tri`` is set to ``True``, an unstructured grid is assumed
(x,y,data must be 1-d) and matplotlib.pyplot.tricolor is used.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.pcolor (or tricolor if
``tri=True``).
"""
ax, plt = self._ax_plt_from_kw(kwargs)
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
if kwargs.pop('tri', False):
try:
import matplotlib.tri as tri
except:
msg='need matplotlib > 0.99.1 to plot on unstructured grids'
raise ImportError(msg)
# for unstructured grids, toss out points outside
# projection limb (don't use those points in triangulation).
if ma.isMA(data):
data = data.filled(fill_value=1.e30)
masked=True
else:
masked=False
mask = np.logical_or(x<1.e20,y<1.e20)
x = np.compress(mask,x)
y = np.compress(mask,y)
data = np.compress(mask,data)
if masked:
triang = tri.Triangulation(x, y)
z = data[triang.triangles]
mask = (z > 1.e20).sum(axis=-1)
triang.set_mask(mask)
ret = ax.tripcolor(triang,data,**kwargs)
else:
ret = ax.tripcolor(x,y,data,**kwargs)
else:
# make x,y masked arrays
# (masked where data is outside of projection limb)
x = ma.masked_values(np.where(x > 1.e20,1.e20,x), 1.e20)
y = ma.masked_values(np.where(y > 1.e20,1.e20,y), 1.e20)
ret = ax.pcolor(x,y,data,**kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# clip for round polar plots.
if self.round: ret,c = self._clipcircle(ax,ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
if self.round:
# for some reason, frame gets turned on.
ax.set_frame_on(False)
return ret
@_transform
def pcolormesh(self,x,y,data,**kwargs):
"""
Make a pseudo-color plot over the map
(see matplotlib.pyplot.pcolormesh documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.pcolormesh.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
ret = ax.pcolormesh(x,y,data,**kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# clip for round polar plots.
if self.round: ret,c = self._clipcircle(ax,ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
if self.round:
# for some reason, frame gets turned on.
ax.set_frame_on(False)
return ret
def hexbin(self,x,y,**kwargs):
"""
Make a hexagonal binning plot of x versus y, where x, y are 1-D
sequences of the same length, N. If C is None (the default), this is a
histogram of the number of occurences of the observations at
(x[i],y[i]).
If C is specified, it specifies values at the coordinate (x[i],y[i]).
These values are accumulated for each hexagonal bin and then reduced
according to reduce_C_function, which defaults to the numpy mean function
(np.mean). (If C is specified, it must also be a 1-D sequence of the
same length as x and y.)
x, y and/or C may be masked arrays, in which case only unmasked points
will be plotted.
(see matplotlib.pyplot.hexbin documentation).
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.pyplot.hexbin
"""
ax, plt = self._ax_plt_from_kw(kwargs)
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
# make x,y masked arrays
# (masked where data is outside of projection limb)
x = ma.masked_values(np.where(x > 1.e20,1.e20,x), 1.e20)
y = ma.masked_values(np.where(y > 1.e20,1.e20,y), 1.e20)
ret = ax.hexbin(x,y,**kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
# reset current active image (only if pyplot is imported).
if plt:
plt.sci(ret)
# clip for round polar plots.
if self.round: ret,c = self._clipcircle(ax,ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return ret
@_transform
def contour(self,x,y,data,*args,**kwargs):
"""
Make a contour plot over the map
(see matplotlib.pyplot.contour documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
If ``tri`` is set to ``True``, an unstructured grid is assumed
(x,y,data must be 1-d) and matplotlib.pyplot.tricontour is used.
Other \*args and \**kwargs passed on to matplotlib.pyplot.contour
(or tricontour if ``tri=True``).
"""
ax, plt = self._ax_plt_from_kw(kwargs)
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
if kwargs.pop('tri', False):
try:
import matplotlib.tri as tri
except:
msg='need matplotlib > 0.99.1 to plot on unstructured grids'
raise ImportError(msg)
# for unstructured grids, toss out points outside
# projection limb (don't use those points in triangulation).
if ma.isMA(data):
data = data.filled(fill_value=1.e30)
masked=True
else:
masked=False
mask = np.logical_or(x<self.xmin,y<self.xmin) +\
np.logical_or(x>self.xmax,y>self.xmax)
x = np.compress(mask,x)
y = np.compress(mask,y)
data = np.compress(mask,data)
if masked:
triang = tri.Triangulation(x, y)
z = data[triang.triangles]
mask = (z > 1.e20).sum(axis=-1)
triang.set_mask(mask)
CS = ax.tricontour(triang,data,*args,**kwargs)
else:
CS = ax.tricontour(x,y,data,*args,**kwargs)
else:
# make sure x is monotonically increasing - if not,
# print warning suggesting that the data be shifted in longitude
# with the shiftgrid function.
# only do this check for global projections.
if self.projection in _cylproj + _pseudocyl:
xx = x[x.shape[0]/2,:]
condition = (xx >= self.xmin) & (xx <= self.xmax)
xl = xx.compress(condition).tolist()
xs = xl[:]
xs.sort()
if xl != xs:
sys.stdout.write(dedent("""
WARNING: x coordinate not montonically increasing - contour plot
may not be what you expect. If it looks odd, your can either
adjust the map projection region to be consistent with your data, or
(if your data is on a global lat/lon grid) use the shiftdata
method to adjust the data to be consistent with the map projection
region (see examples/shiftdata.py)."""))
# mask for points more than one grid length outside projection limb.
xx = ma.masked_where(x > 1.e20, x)
yy = ma.masked_where(y > 1.e20, y)
epsx = np.abs(xx[:,1:]-xx[:,0:-1]).max()
epsy = np.abs(yy[1:,:]-yy[0:-1,:]).max()
xymask = \
np.logical_or(np.greater(x,self.xmax+epsx),np.greater(y,self.ymax+epsy))
xymask = xymask + \
np.logical_or(np.less(x,self.xmin-epsx),np.less(y,self.ymin-epsy))
data = ma.asarray(data)
# combine with data mask.
mask = np.logical_or(ma.getmaskarray(data),xymask)
data = ma.masked_array(data,mask=mask)
CS = ax.contour(x,y,data,*args,**kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
# reset current active image (only if pyplot is imported).
if plt and CS.get_array() is not None:
plt.sci(CS)
# clip for round polar plots.
if self.round: CS.collections,c = self._clipcircle(ax,CS.collections)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return CS
@_transform
def contourf(self,x,y,data,*args,**kwargs):
"""
Make a filled contour plot over the map
(see matplotlib.pyplot.contourf documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
If x or y are outside projection limb (i.e. they have values > 1.e20),
the corresponing data elements will be masked.
Extra keyword 'ax' can be used to override the default axis instance.
If ``tri`` is set to ``True``, an unstructured grid is assumed
(x,y,data must be 1-d) and matplotlib.pyplot.tricontourf is used.
Other \*args and \**kwargs passed on to matplotlib.pyplot.contourf
(or tricontourf if ``tri=True``).
"""
ax, plt = self._ax_plt_from_kw(kwargs)
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
if kwargs.get('tri', False):
try:
import matplotlib.tri as tri
except:
msg='need matplotlib > 0.99.1 to plot on unstructured grids'
raise ImportError(msg)
# for unstructured grids, toss out points outside
# projection limb (don't use those points in triangulation).
if ma.isMA(data):
data = data.filled(fill_value=1.e30)
masked=True
else:
masked=False
mask = np.logical_or(x<1.e20,y<1.e20)
x = np.compress(mask,x)
y = np.compress(mask,y)
data = np.compress(mask,data)
if masked:
triang = tri.Triangulation(x, y)
z = data[triang.triangles]
mask = (z > 1.e20).sum(axis=-1)
triang.set_mask(mask)
CS = ax.tricontourf(triang,data,*args,**kwargs)
else:
CS = ax.tricontourf(x,y,data,*args,**kwargs)
else:
# make sure x is monotonically increasing - if not,
# print warning suggesting that the data be shifted in longitude
# with the shiftgrid function.
# only do this check for global projections.
if self.projection in _cylproj + _pseudocyl:
xx = x[x.shape[0]/2,:]
condition = (xx >= self.xmin) & (xx <= self.xmax)
xl = xx.compress(condition).tolist()
xs = xl[:]
xs.sort()
if xl != xs:
sys.stdout.write(dedent("""
WARNING: x coordinate not montonically increasing - contour plot
may not be what you expect. If it looks odd, your can either
adjust the map projection region to be consistent with your data, or
(if your data is on a global lat/lon grid) use the shiftgrid
function to adjust the data to be consistent with the map projection
region (see examples/contour_demo.py)."""))
# mask for points more than one grid length outside projection limb.
xx = ma.masked_where(x > 1.e20, x)
yy = ma.masked_where(y > 1.e20, y)
if self.projection != 'omerc':
epsx = np.abs(xx[:,1:]-xx[:,0:-1]).max()
epsy = np.abs(yy[1:,:]-yy[0:-1,:]).max()
else: # doesn't work for omerc (FIXME)
epsx = 0.; epsy = 0
xymask = \
np.logical_or(np.greater(x,self.xmax+epsx),np.greater(y,self.ymax+epsy))
xymask = xymask + \
np.logical_or(np.less(x,self.xmin-epsx),np.less(y,self.ymin-epsy))
data = ma.asarray(data)
# combine with data mask.
mask = np.logical_or(ma.getmaskarray(data),xymask)
data = ma.masked_array(data,mask=mask)
CS = ax.contourf(x,y,data,*args,**kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
# reset current active image (only if pyplot is imported).
if plt and CS.get_array() is not None:
plt.sci(CS)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
# clip for round polar plots.
if self.round: CS.collections,c = self._clipcircle(ax,CS.collections)
return CS
@_transformuv
def quiver(self, x, y, u, v, *args, **kwargs):
"""
Make a vector plot (u, v) with arrows on the map.
Grid must be evenly spaced regular grid in x and y.
(see matplotlib.pyplot.quiver documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \*args and \**kwargs passed on to matplotlib.pyplot.quiver.
"""
ax, plt = self._ax_plt_from_kw(kwargs)
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
ret = ax.quiver(x,y,u,v,*args,**kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
if plt is not None and ret.get_array() is not None:
plt.sci(ret)
# clip for round polar plots.
if self.round: ret,c = self._clipcircle(ax,ret)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return ret
@_transformuv
def streamplot(self, x, y, u, v, *args, **kwargs):
"""
Draws streamlines of a vector flow.
(see matplotlib.pyplot.streamplot documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \*args and \**kwargs passed on to matplotlib.pyplot.streamplot.
"""
if _matplotlib_version < '1.2':
msg = dedent("""
streamplot method requires matplotlib 1.2 or higher,
you have %s""" % _matplotlib_version)
raise NotImplementedError(msg)
ax, plt = self._ax_plt_from_kw(kwargs)
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
try:
ret = ax.streamplot(x,y,u,v,*args,**kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
if plt is not None and ret.lines.get_array() is not None:
plt.sci(ret.lines)
# clip for round polar plots.
# streamplot arrows not returned in matplotlib 1.1.1, so clip all
# FancyArrow patches attached to axes instance.
if self. round:
ret,c = self._clipcircle(ax,ret)
for p in ax.patches:
if isinstance(p,FancyArrowPatch): p.set_clip_path(c)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return ret
@_transformuv
def barbs(self, x, y, u, v, *args, **kwargs):
"""
Make a wind barb plot (u, v) with on the map.
(see matplotlib.pyplot.barbs documentation).
If ``latlon`` keyword is set to True, x,y are intrepreted as
longitude and latitude in degrees. Data and longitudes are
automatically shifted to match map projection region for cylindrical
and pseudocylindrical projections, and x,y are transformed to map
projection coordinates. If ``latlon`` is False (default), x and y
are assumed to be map projection coordinates.
Extra keyword ``ax`` can be used to override the default axis instance.
Other \*args and \**kwargs passed on to matplotlib.pyplot.barbs
Returns two matplotlib.axes.Barbs instances, one for the Northern
Hemisphere and one for the Southern Hemisphere.
"""
if _matplotlib_version < '0.98.3':
msg = dedent("""
barb method requires matplotlib 0.98.3 or higher,
you have %s""" % _matplotlib_version)
raise NotImplementedError(msg)
ax, plt = self._ax_plt_from_kw(kwargs)
# allow callers to override the hold state by passing hold=True|False
b = ax.ishold()
h = kwargs.pop('hold',None)
if h is not None:
ax.hold(h)
lons, lats = self(x, y, inverse=True)
unh = ma.masked_where(lats <= 0, u)
vnh = ma.masked_where(lats <= 0, v)
ush = ma.masked_where(lats > 0, u)
vsh = ma.masked_where(lats > 0, v)
try:
retnh = ax.barbs(x,y,unh,vnh,*args,**kwargs)
kwargs['flip_barb']=True
retsh = ax.barbs(x,y,ush,vsh,*args,**kwargs)
except:
ax.hold(b)
raise
ax.hold(b)
# Because there are two collections returned in general,
# we can't set the current image...
#if plt is not None and ret.get_array() is not None:
# plt.sci(retnh)
# clip for round polar plots.
if self.round:
retnh,c = self._clipcircle(ax,retnh)
retsh,c = self._clipcircle(ax,retsh)
# set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return retnh,retsh
def drawlsmask(self,land_color="0.8",ocean_color="w",lsmask=None,
lsmask_lons=None,lsmask_lats=None,lakes=True,resolution='l',grid=5,**kwargs):
"""
Draw land-sea mask image.
.. note::
The land-sea mask image cannot be overlaid on top
of other images, due to limitations in matplotlib image handling
(you can't specify the zorder of an image).
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
land_color desired land color (color name or rgba tuple).
Default gray ("0.8").
ocean_color desired water color (color name or rgba tuple).
Default white.
lsmask An array of 0's for ocean pixels, 1's for
land pixels and 2's for lake/pond pixels.
Default is None
(default 5-minute resolution land-sea mask is used).
lakes Plot lakes and ponds (Default True)
lsmask_lons 1d array of longitudes for lsmask (ignored
if lsmask is None). Longitudes must be ordered
from -180 W eastward.
lsmask_lats 1d array of latitudes for lsmask (ignored
if lsmask is None). Latitudes must be ordered
from -90 S northward.
resolution gshhs coastline resolution used to define land/sea
mask (default 'l', available 'c','l','i','h' or 'f')
grid land/sea mask grid spacing in minutes (Default 5;
10, 2.5 and 1.25 are also available).
\**kwargs extra keyword arguments passed on to
:meth:`imshow`
============== ====================================================
If any of the lsmask, lsmask_lons or lsmask_lats keywords are not
set, the built in GSHHS land-sea mask datasets are used.
Extra keyword ``ax`` can be used to override the default axis instance.
returns a matplotlib.image.AxesImage instance.
"""
# convert land and water colors to integer rgba tuples with
# values between 0 and 255.
from matplotlib.colors import ColorConverter
c = ColorConverter()
# if conversion fails, assume it's because the color
# given is already an rgba tuple with values between 0 and 255.
try:
cl = c.to_rgba(land_color)
rgba_land = tuple([int(255*x) for x in cl])
except:
rgba_land = land_color
try:
co = c.to_rgba(ocean_color)
rgba_ocean = tuple([int(255*x) for x in co])
except:
rgba_ocean = ocean_color
# look for axes instance (as keyword, an instance variable
# or from plt.gca().
ax = kwargs.pop('ax', None) or self._check_ax()
# if lsmask,lsmask_lons,lsmask_lats keywords not given,
# read default land-sea mask in from file.
if lsmask is None or lsmask_lons is None or lsmask_lats is None:
# if lsmask instance variable already set, data already
# read in.
if self.lsmask is None:
# read in land/sea mask.
lsmask_lons, lsmask_lats, lsmask =\
_readlsmask(lakes=lakes,resolution=resolution,grid=grid)
# instance variable lsmask is set on first invocation,
# it contains the land-sea mask interpolated to the native
# projection grid. Further calls to drawlsmask will not
# redo the interpolation (unless a new land-sea mask is passed
# in via the lsmask, lsmask_lons, lsmask_lats keywords).
# is it a cylindrical projection whose limits lie
# outside the limits of the image?
cylproj = self.projection in _cylproj and \
(self.urcrnrlon > lsmask_lons[-1] or \
self.llcrnrlon < lsmask_lons[0])
if cylproj:
# stack grids side-by-side (in longitiudinal direction), so
# any range of longitudes may be plotted on a world map.
lsmask_lons = \
np.concatenate((lsmask_lons,lsmask_lons[1:]+360),1)
lsmask = \
np.concatenate((lsmask,lsmask[:,1:]),1)
else:
if lakes: lsmask = np.where(lsmask==2,np.array(0,np.uint8),lsmask)
# transform mask to nx x ny regularly spaced native projection grid
# nx and ny chosen to have roughly the same horizontal
# resolution as mask.
if self.lsmask is None:
nlons = len(lsmask_lons)
nlats = len(lsmask_lats)
if self.projection == 'cyl':
dx = lsmask_lons[1]-lsmask_lons[0]
else:
dx = (np.pi/180.)*(lsmask_lons[1]-lsmask_lons[0])*self.rmajor
nx = int((self.xmax-self.xmin)/dx)+1; ny = int((self.ymax-self.ymin)/dx)+1
# interpolate rgba values from proj='cyl' (geographic coords)
# to a rectangular map projection grid.
mask,x,y = self.transform_scalar(lsmask,lsmask_lons,\
lsmask_lats,nx,ny,returnxy=True,order=0,masked=255)
lsmask_lats.dtype
# for these projections, points outside the projection
# limb have to be set to transparent manually.
if self.projection in _pseudocyl:
lons, lats = self(x, y, inverse=True)
lon_0 = self.projparams['lon_0']
lats = lats[:,nx/2]
lons1 = (lon_0+180.)*np.ones(lons.shape[0],np.float64)
lons2 = (lon_0-180.)*np.ones(lons.shape[0],np.float64)
xmax,ytmp = self(lons1,lats)
xmin,ytmp = self(lons2,lats)
for j in range(lats.shape[0]):
xx = x[j,:]
mask[j,:]=np.where(np.logical_or(xx<xmin[j],xx>xmax[j]),\
255,mask[j,:])
self.lsmask = mask
ny, nx = self.lsmask.shape
rgba = np.ones((ny,nx,4),np.uint8)
rgba_land = np.array(rgba_land,np.uint8)
rgba_ocean = np.array(rgba_ocean,np.uint8)
for k in range(4):
rgba[:,:,k] = np.where(self.lsmask,rgba_land[k],rgba_ocean[k])
# make points outside projection limb transparent.
rgba[:,:,3] = np.where(self.lsmask==255,0,rgba[:,:,3])
# plot mask as rgba image.
im = self.imshow(rgba,interpolation='nearest',ax=ax,**kwargs)
# clip for round polar plots.
if self.round: im,c = self._clipcircle(ax,im)
return im
def bluemarble(self,ax=None,scale=None,**kwargs):
"""
display blue marble image (from http://visibleearth.nasa.gov)
as map background.
Default image size is 5400x2700, which can be quite slow and
use quite a bit of memory. The ``scale`` keyword can be used
to downsample the image (``scale=0.5`` downsamples to 2700x1350).
\**kwargs passed on to :meth:`imshow`.
returns a matplotlib.image.AxesImage instance.
"""
if ax is not None:
return self.warpimage(image='bluemarble',ax=ax,scale=scale,**kwargs)
else:
return self.warpimage(image='bluemarble',scale=scale,**kwargs)
def shadedrelief(self,ax=None,scale=None,**kwargs):
"""
display shaded relief image (from http://www.shadedrelief.com)
as map background.
Default image size is 10800x5400, which can be quite slow and
use quite a bit of memory. The ``scale`` keyword can be used
to downsample the image (``scale=0.5`` downsamples to 5400x2700).
\**kwargs passed on to :meth:`imshow`.
returns a matplotlib.image.AxesImage instance.
"""
if ax is not None:
return self.warpimage(image='shadedrelief',ax=ax,scale=scale,**kwargs)
else:
return self.warpimage(image='shadedrelief',scale=scale,**kwargs)
def etopo(self,ax=None,scale=None,**kwargs):
"""
display etopo relief image (from
http://www.ngdc.noaa.gov/mgg/global/global.html)
as map background.
Default image size is 5400x2700, which can be quite slow and
use quite a bit of memory. The ``scale`` keyword can be used
to downsample the image (``scale=0.5`` downsamples to 5400x2700).
\**kwargs passed on to :meth:`imshow`.
returns a matplotlib.image.AxesImage instance.
"""
if ax is not None:
return self.warpimage(image='etopo',ax=ax,scale=scale,**kwargs)
else:
return self.warpimage(image='etopo',scale=scale,**kwargs)
def warpimage(self,image="bluemarble",scale=None,**kwargs):
"""
Display an image (filename given by ``image`` keyword) as a map background.
If image is a URL (starts with 'http'), it is downloaded to a temp
file using urllib.urlretrieve.
Default (if ``image`` not specified) is to display
'blue marble next generation' image from http://visibleearth.nasa.gov/.
Specified image must have pixels covering the whole globe in a regular
lat/lon grid, starting and -180W and the South Pole.
Works with the global images from
http://earthobservatory.nasa.gov/Features/BlueMarble/BlueMarble_monthlies.php.
The ``scale`` keyword can be used to downsample (rescale) the image.
Values less than 1.0 will speed things up at the expense of image
resolution.
Extra keyword ``ax`` can be used to override the default axis instance.
\**kwargs passed on to :meth:`imshow`.
returns a matplotlib.image.AxesImage instance.
"""
try:
from PIL import Image
except ImportError:
raise ImportError('warpimage method requires PIL (http://www.pythonware.com/products/pil)')
from matplotlib.image import pil_to_array
if self.celestial:
msg='warpimage does not work in celestial coordinates'
raise ValueError(msg)
ax = kwargs.pop('ax', None) or self._check_ax()
# default image file is blue marble next generation
# from NASA (http://visibleearth.nasa.gov).
if image == "bluemarble":
file = os.path.join(basemap_datadir,'bmng.jpg')
# display shaded relief image (from
# http://www.shadedreliefdata.com)
elif image == "shadedrelief":
file = os.path.join(basemap_datadir,'shadedrelief.jpg')
# display etopo image (from
# http://www.ngdc.noaa.gov/mgg/image/globalimages.html)
elif image == "etopo":
file = os.path.join(basemap_datadir,'etopo1.jpg')
else:
file = image
# if image is same as previous invocation, used cached data.
# if not, regenerate rgba data.
if not hasattr(self,'_bm_file') or self._bm_file != file:
newfile = True
else:
newfile = False
if file.startswith('http'):
from urllib import urlretrieve
self._bm_file, headers = urlretrieve(file)
else:
self._bm_file = file
# bmproj is True if map projection region is same as
# image region.
bmproj = self.projection == 'cyl' and \
self.llcrnrlon == -180 and self.urcrnrlon == 180 and \
self.llcrnrlat == -90 and self.urcrnrlat == 90
# read in jpeg image to rgba array of normalized floats.
if not hasattr(self,'_bm_rgba') or newfile:
pilImage = Image.open(self._bm_file)
if scale is not None:
w, h = pilImage.size
width = int(np.round(w*scale))
height = int(np.round(h*scale))
pilImage = pilImage.resize((width,height),Image.ANTIALIAS)
if _matplotlib_version >= '1.2':
# orientation of arrays returned by pil_to_array
# changed (https://github.com/matplotlib/matplotlib/pull/616)
self._bm_rgba = pil_to_array(pilImage)[::-1,:]
else:
self._bm_rgba = pil_to_array(pilImage)
# define lat/lon grid that image spans.
nlons = self._bm_rgba.shape[1]; nlats = self._bm_rgba.shape[0]
delta = 360./float(nlons)
self._bm_lons = np.arange(-180.+0.5*delta,180.,delta)
self._bm_lats = np.arange(-90.+0.5*delta,90.,delta)
# is it a cylindrical projection whose limits lie
# outside the limits of the image?
cylproj = self.projection in _cylproj and \
(self.urcrnrlon > self._bm_lons[-1] or \
self.llcrnrlon < self._bm_lons[0])
# if pil_to_array returns a 2D array, it's a grayscale image.
# create an RGB image, with R==G==B.
if self._bm_rgba.ndim == 2:
tmp = np.empty(self._bm_rgba.shape+(3,),np.uint8)
for k in range(3):
tmp[:,:,k] = self._bm_rgba
self._bm_rgba = tmp
if cylproj and not bmproj:
# stack grids side-by-side (in longitiudinal direction), so
# any range of longitudes may be plotted on a world map.
self._bm_lons = \
np.concatenate((self._bm_lons,self._bm_lons+360),1)
self._bm_rgba = \
np.concatenate((self._bm_rgba,self._bm_rgba),1)
# convert to normalized floats.
self._bm_rgba = self._bm_rgba.astype(np.float32)/255.
if not bmproj: # interpolation necessary.
if newfile or not hasattr(self,'_bm_rgba_warped'):
# transform to nx x ny regularly spaced native
# projection grid.
# nx and ny chosen to have roughly the
# same horizontal res as original image.
if self.projection != 'cyl':
dx = 2.*np.pi*self.rmajor/float(nlons)
nx = int((self.xmax-self.xmin)/dx)+1
ny = int((self.ymax-self.ymin)/dx)+1
else:
dx = 360./float(nlons)
nx = int((self.urcrnrlon-self.llcrnrlon)/dx)+1
ny = int((self.urcrnrlat-self.llcrnrlat)/dx)+1
self._bm_rgba_warped = np.ones((ny,nx,4),np.float64)
# interpolate rgba values from geographic coords (proj='cyl')
# to map projection coords.
# if masked=True, values outside of
# projection limb will be masked.
for k in range(3):
self._bm_rgba_warped[:,:,k],x,y = \
self.transform_scalar(self._bm_rgba[:,:,k],\
self._bm_lons,self._bm_lats,nx,ny,returnxy=True)
# for ortho,geos mask pixels outside projection limb.
if self.projection in ['geos','ortho','nsper'] or \
(self.projection == 'aeqd' and self._fulldisk):
lonsr,latsr = self(x,y,inverse=True)
mask = ma.zeros((ny,nx,4),np.int8)
mask[:,:,0] = np.logical_or(lonsr>1.e20,latsr>1.e30)
for k in range(1,4):
mask[:,:,k] = mask[:,:,0]
self._bm_rgba_warped = \
ma.masked_array(self._bm_rgba_warped,mask=mask)
# make points outside projection limb transparent.
self._bm_rgba_warped = self._bm_rgba_warped.filled(0.)
# treat pseudo-cyl projections such as mollweide, robinson and sinusoidal.
elif self.projection in _pseudocyl and \
self.projection != 'hammer':
lonsr,latsr = self(x,y,inverse=True)
mask = ma.zeros((ny,nx,4),np.int8)
lon_0 = self.projparams['lon_0']
lonright = lon_0+180.
lonleft = lon_0-180.
x1 = np.array(ny*[0.5*(self.xmax + self.xmin)],np.float)
y1 = np.linspace(self.ymin, self.ymax, ny)
lons1, lats1 = self(x1,y1,inverse=True)
lats1 = np.where(lats1 < -89.999999, -89.999999, lats1)
lats1 = np.where(lats1 > 89.999999, 89.999999, lats1)
for j,lat in enumerate(lats1):
xmax,ymax = self(lonright,lat)
xmin,ymin = self(lonleft,lat)
mask[j,:,0] = np.logical_or(x[j,:]>xmax,x[j,:]<xmin)
for k in range(1,4):
mask[:,:,k] = mask[:,:,0]
self._bm_rgba_warped = \
ma.masked_array(self._bm_rgba_warped,mask=mask)
# make points outside projection limb transparent.
self._bm_rgba_warped = self._bm_rgba_warped.filled(0.)
# plot warped rgba image.
im = self.imshow(self._bm_rgba_warped,ax=ax,**kwargs)
# for hammer projection, use clip path defined by
# projection limb (patch created in drawmapboundary).
if self.projection == 'hammer':
if not self._mapboundarydrawn:
self.drawmapboundary(color='none',linewidth=None)
im.set_clip_path(self._mapboundarydrawn)
else:
# bmproj True, no interpolation necessary.
im = self.imshow(self._bm_rgba,ax=ax,**kwargs)
# clip for round polar plots.
if self.round: im,c = self._clipcircle(ax,im)
return im
def arcgisimage(self,server='http://server.arcgisonline.com/ArcGIS',\
service='ESRI_Imagery_World_2D',xpixels=400,ypixels=None,\
dpi=96,verbose=False,**kwargs):
"""
Retrieve an image using the ArcGIS Server REST API and display it on
the map. In order to use this method, the Basemap instance must be
created using the ``epsg`` keyword to define the map projection, unless
the ``cyl`` projection is used (in which case the epsg code 4326 is
assumed).
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
server web map server URL (default
http://server.arcgisonline.com/ArcGIS).
service service (image type) hosted on server (default
ESRI_Imagery_World_2D, which is NASA 'Blue Marble'
image).
xpixels requested number of image pixels in x-direction
(default 400).
ypixels requested number of image pixels in y-direction.
Default (None) is to infer the number from
from xpixels and the aspect ratio of the
map projection region.
dpi The device resolution of the exported image (dots per
inch, default 96).
verbose if True, print URL used to retrieve image (default
False).
============== ====================================================
Extra keyword ``ax`` can be used to override the default axis instance.
returns a matplotlib.image.AxesImage instance.
"""
import urllib2
if not hasattr(self,'epsg'):
msg = dedent("""
Basemap instance must be creating using an EPSG code
(http://spatialreference.org) in order to use the wmsmap method""")
raise ValueError(msg)
# find the x,y values at the corner points.
p = pyproj.Proj(init="epsg:%s" % self.epsg, preserve_units=True)
xmin,ymin = p(self.llcrnrlon,self.llcrnrlat)
xmax,ymax = p(self.urcrnrlon,self.urcrnrlat)
if self.projection in _cylproj:
Dateline =\
_geoslib.Point(self(180.,0.5*(self.llcrnrlat+self.urcrnrlat)))
hasDateline = Dateline.within(self._boundarypolyxy)
if hasDateline:
msg=dedent("""
arcgisimage cannot handle images that cross
the dateline for cylindrical projections.""")
raise ValueError(msg)
if self.projection == 'cyl':
xmin = (180./np.pi)*xmin; xmax = (180./np.pi)*xmax
ymin = (180./np.pi)*ymin; ymax = (180./np.pi)*ymax
# ypixels not given, find by scaling xpixels by the map aspect ratio.
if ypixels is None:
ypixels = int(self.aspect*xpixels)
# construct a URL using the ArcGIS Server REST API.
basemap_url = \
"%s/rest/services/%s/MapServer/export?\
bbox=%s,%s,%s,%s&\
bboxSR=%s&\
imageSR=%s&\
size=%s,%s&\
dpi=%s&\
format=png32&\
f=image" %\
(server,service,xmin,ymin,xmax,ymax,self.epsg,self.epsg,xpixels,ypixels,dpi)
# print URL?
if verbose: print basemap_url
# return AxesImage instance.
return self.imshow(imread(urllib2.urlopen(basemap_url)),origin='upper')
def wmsimage(self,server,\
xpixels=400,ypixels=None,\
format='png',verbose=False,**kwargs):
"""
Retrieve an image using from a WMS server using the
Open Geospatial Consortium (OGC) standard interface
and display on the map. Requires OWSLib
(http://pypi.python.org/pypi/OWSLib).
In order to use this method, the Basemap instance must be
created using the ``epsg`` keyword to define the map projection, unless
the ``cyl`` projection is used (in which case the epsg code 4326 is
assumed).
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
server WMS server URL.
xpixels requested number of image pixels in x-direction
(default 400).
ypixels requested number of image pixels in y-direction.
Default (None) is to infer the number from
from xpixels and the aspect ratio of the
map projection region.
format desired image format (default 'png')
verbose if True, print WMS server info (default
False).
\**kwargs extra keyword arguments passed on to
OWSLib.wms.WebMapService.getmap.
============== ====================================================
Extra keyword ``ax`` can be used to override the default axis instance.
returns a matplotlib.image.AxesImage instance.
"""
try:
from owslib.wms import WebMapService
except ImportError:
raise ImportError('OWSLib required to use wmsimage method')
import urllib2, io
if not hasattr(self,'epsg'):
msg = dedent("""
Basemap instance must be creating using an EPSG code
(http://spatialreference.org) in order to use the wmsmap method""")
raise ValueError(msg)
if 'layers' not in kwargs:
raise ValueError('no layers specified')
# find the x,y values at the corner points.
p = pyproj.Proj(init="epsg:%s" % self.epsg, preserve_units=True)
xmin,ymin = p(self.llcrnrlon,self.llcrnrlat)
xmax,ymax = p(self.urcrnrlon,self.urcrnrlat)
if self.projection in _cylproj:
Dateline =\
_geoslib.Point(self(180.,0.5*(self.llcrnrlat+self.urcrnrlat)))
hasDateline = Dateline.within(self._boundarypolyxy)
if hasDateline:
msg=dedent("""
wmsimage cannot handle images that cross
the dateline for cylindrical projections.""")
raise ValueError(msg)
if self.projection == 'cyl':
xmin = (180./np.pi)*xmin; xmax = (180./np.pi)*xmax
ymin = (180./np.pi)*ymin; ymax = (180./np.pi)*ymax
# ypixels not given, find by scaling xpixels by the map aspect ratio.
if ypixels is None:
ypixels = int(self.aspect*xpixels)
if verbose: print server
wms = WebMapService(server)
if verbose:
print 'id: %s, version: %s' %\
(wms.identification.type,wms.identification.version)
print 'title: %s, abstract: %s' %\
(wms.identification.title,wms.identification.abstract)
print 'available layers:'
print list(wms.contents)
print 'projection options:'
print wms[kwargs['layers'][0]].crsOptions
# remove keys from kwargs that are over-ridden
for k in ['format','bbox','service','size','srs']:
if 'format' in kwargs: del kwargs['format']
img = wms.getmap(service='wms',bbox=(xmin,ymin,xmax,ymax),
size=(xpixels,ypixels),format='image/%s'%format,
srs='EPSG:%s' % self.epsg, **kwargs)
# return AxesImage instance.
# this works for png and jpeg.
return self.imshow(imread(io.BytesIO(urllib2.urlopen(img.url).read()),
format=format),origin='upper')
# this works for png, but not jpeg
#return self.imshow(imread(urllib2.urlopen(img.url),format=format),origin='upper')
def drawmapscale(self,lon,lat,lon0,lat0,length,barstyle='simple',\
units='km',fontsize=9,yoffset=None,labelstyle='simple',\
fontcolor='k',fillcolor1='w',fillcolor2='k',ax=None,\
format='%d',zorder=None):
"""
Draw a map scale at ``lon,lat`` of length ``length``
representing distance in the map
projection coordinates at ``lon0,lat0``.
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
units the units of the length argument (Default km).
barstyle ``simple`` or ``fancy`` (roughly corresponding
to the styles provided by Generic Mapping Tools).
Default ``simple``.
fontsize for map scale annotations, default 9.
color for map scale annotations, default black.
labelstype ``simple`` (default) or ``fancy``. For
``fancy`` the map scale factor (ratio betwee
the actual distance and map projection distance
at lon0,lat0) and the value of lon0,lat0 are also
displayed on the top of the scale bar. For
``simple``, just the units are display on top
and the distance below the scale bar.
If equal to False, plot an empty label.
format a string formatter to format numeric values
yoffset yoffset controls how tall the scale bar is,
and how far the annotations are offset from the
scale bar. Default is 0.02 times the height of
the map (0.02*(self.ymax-self.ymin)).
fillcolor1(2) colors of the alternating filled regions
(default white and black). Only relevant for
'fancy' barstyle.
zorder sets the zorder for the map scale.
============== ====================================================
Extra keyword ``ax`` can be used to override the default axis instance.
"""
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# not valid for cylindrical projection
if self.projection == 'cyl':
raise ValueError("cannot draw map scale for projection='cyl'")
# convert length to meters
lenlab = length
if units == 'km':
length = length*1000
elif units == 'mi':
length = length*1609.344
elif units == 'nmi':
length = length*1852
elif units != 'm':
msg = "units must be 'm' (meters), 'km' (kilometers), "\
"'mi' (miles) or 'nmi' (nautical miles)"
raise KeyError(msg)
# reference point and center of scale.
x0,y0 = self(lon0,lat0)
xc,yc = self(lon,lat)
# make sure lon_0 between -180 and 180
lon_0 = ((lon0+360) % 360) - 360
if lat0>0:
if lon>0:
lonlatstr = u'%g\N{DEGREE SIGN}N, %g\N{DEGREE SIGN}E' % (lat0,lon_0)
elif lon<0:
lonlatstr = u'%g\N{DEGREE SIGN}N, %g\N{DEGREE SIGN}W' % (lat0,lon_0)
else:
lonlatstr = u'%g\N{DEGREE SIGN}, %g\N{DEGREE SIGN}W' % (lat0,lon_0)
else:
if lon>0:
lonlatstr = u'%g\N{DEGREE SIGN}S, %g\N{DEGREE SIGN}E' % (lat0,lon_0)
elif lon<0:
lonlatstr = u'%g\N{DEGREE SIGN}S, %g\N{DEGREE SIGN}W' % (lat0,lon_0)
else:
lonlatstr = u'%g\N{DEGREE SIGN}S, %g\N{DEGREE SIGN}' % (lat0,lon_0)
# left edge of scale
lon1,lat1 = self(x0-length/2,y0,inverse=True)
x1,y1 = self(lon1,lat1)
# right edge of scale
lon4,lat4 = self(x0+length/2,y0,inverse=True)
x4,y4 = self(lon4,lat4)
x1 = x1-x0+xc; y1 = y1-y0+yc
x4 = x4-x0+xc; y4 = y4-y0+yc
if x1 > 1.e20 or x4 > 1.e20 or y1 > 1.e20 or y4 > 1.e20:
raise ValueError("scale bar positioned outside projection limb")
# scale factor for true distance
gc = pyproj.Geod(a=self.rmajor,b=self.rminor)
az12,az21,dist = gc.inv(lon1,lat1,lon4,lat4)
scalefact = dist/length
# label to put on top of scale bar.
if labelstyle=='simple':
labelstr = units
elif labelstyle == 'fancy':
labelstr = units+" (scale factor %4.2f at %s)"%(scalefact,lonlatstr)
elif labelstyle == False:
labelstr = ''
else:
raise KeyError("labelstyle must be 'simple' or 'fancy'")
# default y offset is 2 percent of map height.
if yoffset is None: yoffset = 0.02*(self.ymax-self.ymin)
rets = [] # will hold all plot objects generated.
# 'fancy' style
if barstyle == 'fancy':
#we need 5 sets of x coordinates (in map units)
#quarter scale
lon2,lat2 = self(x0-length/4,y0,inverse=True)
x2,y2 = self(lon2,lat2)
x2 = x2-x0+xc; y2 = y2-y0+yc
#three quarter scale
lon3,lat3 = self(x0+length/4,y0,inverse=True)
x3,y3 = self(lon3,lat3)
x3 = x3-x0+xc; y3 = y3-y0+yc
#plot top line
ytop = yc+yoffset/2
ybottom = yc-yoffset/2
ytick = ybottom - yoffset/2
ytext = ytick - yoffset/2
rets.append(self.plot([x1,x4],[ytop,ytop],color=fontcolor)[0])
#plot bottom line
rets.append(self.plot([x1,x4],[ybottom,ybottom],color=fontcolor)[0])
#plot left edge
rets.append(self.plot([x1,x1],[ybottom,ytop],color=fontcolor)[0])
#plot right edge
rets.append(self.plot([x4,x4],[ybottom,ytop],color=fontcolor)[0])
#make a filled black box from left edge to 1/4 way across
rets.append(ax.fill([x1,x2,x2,x1,x1],[ytop,ytop,ybottom,ybottom,ytop],\
ec=fontcolor,fc=fillcolor1)[0])
#make a filled white box from 1/4 way across to 1/2 way across
rets.append(ax.fill([x2,xc,xc,x2,x2],[ytop,ytop,ybottom,ybottom,ytop],\
ec=fontcolor,fc=fillcolor2)[0])
#make a filled white box from 1/2 way across to 3/4 way across
rets.append(ax.fill([xc,x3,x3,xc,xc],[ytop,ytop,ybottom,ybottom,ytop],\
ec=fontcolor,fc=fillcolor1)[0])
#make a filled white box from 3/4 way across to end
rets.append(ax.fill([x3,x4,x4,x3,x3],[ytop,ytop,ybottom,ybottom,ytop],\
ec=fontcolor,fc=fillcolor2)[0])
#plot 3 tick marks at left edge, center, and right edge
rets.append(self.plot([x1,x1],[ytick,ybottom],color=fontcolor)[0])
rets.append(self.plot([xc,xc],[ytick,ybottom],color=fontcolor)[0])
rets.append(self.plot([x4,x4],[ytick,ybottom],color=fontcolor)[0])
#label 3 tick marks
rets.append(ax.text(x1,ytext,format % (0),\
horizontalalignment='center',\
verticalalignment='top',\
fontsize=fontsize,color=fontcolor))
rets.append(ax.text(xc,ytext,format % (0.5*lenlab),\
horizontalalignment='center',\
verticalalignment='top',\
fontsize=fontsize,color=fontcolor))
rets.append(ax.text(x4,ytext,format % (lenlab),\
horizontalalignment='center',\
verticalalignment='top',\
fontsize=fontsize,color=fontcolor))
#put units, scale factor on top
rets.append(ax.text(xc,ytop+yoffset/2,labelstr,\
horizontalalignment='center',\
verticalalignment='bottom',\
fontsize=fontsize,color=fontcolor))
# 'simple' style
elif barstyle == 'simple':
rets.append(self.plot([x1,x4],[yc,yc],color=fontcolor)[0])
rets.append(self.plot([x1,x1],[yc-yoffset,yc+yoffset],color=fontcolor)[0])
rets.append(self.plot([x4,x4],[yc-yoffset,yc+yoffset],color=fontcolor)[0])
rets.append(ax.text(xc,yc-yoffset,format % lenlab,\
verticalalignment='top',horizontalalignment='center',\
fontsize=fontsize,color=fontcolor))
#put units, scale factor on top
rets.append(ax.text(xc,yc+yoffset,labelstr,\
horizontalalignment='center',\
verticalalignment='bottom',\
fontsize=fontsize,color=fontcolor))
else:
raise KeyError("barstyle must be 'simple' or 'fancy'")
if zorder is not None:
for ret in rets:
try:
ret.set_zorder(zorder)
except:
pass
return rets
def colorbar(self,mappable=None,location='right',size="5%",pad='2%',fig=None,ax=None,**kwargs):
"""
Add colorbar to axes associated with a map.
The colorbar axes instance is created using the axes_grid toolkit.
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
mappable the Image, ContourSet, etc. to which the colorbar
applies. Default None, matplotlib.pyplot.gci() is
used to retrieve the current image mappable.
location where to put colorbar ('top','bottom','left','right')
Default 'right'.
size width of colorbar axes (string 'N%', where N is
an integer describing the fractional width of the parent
axes). Default '5%'.
pad Padding between parent axes and colorbar axes in
same units as size. Default '2%'.
fig Figure instance the map axes instance is associated
with. Default None, and matplotlib.pyplot.gcf() is used
to retrieve the current active figure instance.
ax The axes instance which the colorbar will be
associated with. Default None, searches for self.ax,
and if None uses matplotlib.pyplot.gca().
\**kwargs extra keyword arguments passed on to
colorbar method of the figure instance.
============== ====================================================
Returns a matplotlib colorbar instance.
"""
# get current axes instance (if none specified).
ax = ax or self._check_ax()
# get current figure instance (if none specified).
if fig is None or mappable is None:
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
# get current mappable if none specified.
if mappable is None:
mappable = plt.gci()
# create colorbar axes uses axes_grid toolkit.
divider = make_axes_locatable(ax)
if location in ['left','right']:
orientation = 'vertical'
elif location in ['top','bottom']:
orientation = 'horizontal'
else:
raise ValueError('location must be top,bottom,left or right')
cax = divider.append_axes(location, size=size, pad=pad)
# create colorbar.
cb = fig.colorbar(mappable,orientation=orientation,cax=cax,**kwargs)
fig.sca(ax) # reset parent axes as current axes.
return cb
def nightshade(self,date,color="k",delta=0.25,alpha=0.5,ax=None,zorder=2):
"""
Shade the regions of the map that are in darkness at the time
specifed by ``date``. ``date`` is a datetime instance,
assumed to be UTC.
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
color color to shade night regions (default black).
delta day/night terminator is computed with a
a resolution of ``delta`` degrees (default 0.25).
alpha alpha transparency for shading (default 0.5, so
map background shows through).
zorder zorder for shading (default 2).
============== ====================================================
Extra keyword ``ax`` can be used to override the default axis instance.
returns a matplotlib.contour.ContourSet instance.
"""
from .solar import daynight_grid
# make sure date is utc.
if date.utcoffset() is not None:
raise ValueError('datetime instance must be UTC')
# create grid of day=0, night=1
lons,lats,daynight = daynight_grid(date,delta,self.lonmin,self.lonmax)
x,y = self(lons,lats)
# contour the day-night grid, coloring the night area
# with the specified color and transparency.
CS = self.contourf(x,y,daynight,1,colors=[color],alpha=alpha,ax=ax)
# set zorder on ContourSet collections show night shading
# is on top.
for c in CS.collections:
c.set_zorder(zorder)
# clip for round polar plots.
if self.round: CS.collections,c = self._clipcircle(ax,CS.collections)
return CS
def _check_ax(self):
"""
Returns the axis on which to draw.
Returns self.ax, or if self.ax=None returns plt.gca().
"""
if self.ax is None:
try:
ax = plt.gca()
except:
import matplotlib.pyplot as plt
ax = plt.gca()
# associate an axes instance with this Basemap instance
# the first time this method is called.
#self.ax = ax
else:
ax = self.ax
return ax
def _ax_plt_from_kw(self, kw):
"""
Return (ax, plt), where ax is the current axes, and plt is
None or a reference to the pyplot module.
plt will be None if ax was popped from kw or taken from self.ax;
otherwise, pyplot was used and is returned.
"""
plt = None
_ax = kw.pop('ax', None)
if _ax is None:
_ax = self.ax
if _ax is None:
import matplotlib.pyplot as plt
_ax = plt.gca()
return _ax, plt
def shiftdata(self,lonsin,datain=None,lon_0=None):
"""
Shift longitudes (and optionally data) so that they match map projection region.
Only valid for cylindrical/pseudo-cylindrical global projections and data
on regular lat/lon grids. longitudes and data can be 1-d or 2-d, if 2-d
it is assumed longitudes are 2nd (rightmost) dimension.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lonsin original 1-d or 2-d longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
datain original 1-d or 2-d data. Default None.
lon_0 center of map projection region. Defaut None,
given by current map projection.
============== ====================================================
if datain given, returns ``dataout,lonsout`` (data and longitudes shifted to fit in interval
[lon_0-180,lon_0+180]), otherwise just returns longitudes. If
transformed longitudes lie outside map projection region, data is
masked and longitudes are set to 1.e30.
"""
if lon_0 is None and 'lon_0' not in self.projparams:
msg='lon_0 keyword must be provided'
raise ValueError(msg)
lonsin = np.asarray(lonsin)
if lonsin.ndim not in [1,2]:
raise ValueError('1-d or 2-d longitudes required')
if datain is not None:
# if it's a masked array, leave it alone.
if not ma.isMA(datain): datain = np.asarray(datain)
if datain.ndim not in [1,2]:
raise ValueError('1-d or 2-d data required')
if lon_0 is None:
lon_0 = self.projparams['lon_0']
# 2-d data.
if lonsin.ndim == 2:
nlats = lonsin.shape[0]
nlons = lonsin.shape[1]
lonsin1 = lonsin[0,:]
lonsin1 = np.where(lonsin1 > lon_0+180, lonsin1-360 ,lonsin1)
lonsin1 = np.where(lonsin1 < lon_0-180, lonsin1+360 ,lonsin1)
londiff = np.abs(lonsin1[0:-1]-lonsin1[1:])
londiff_sort = np.sort(londiff)
thresh = 360.-londiff_sort[-2]
itemindex = nlons-np.where(londiff>=thresh)[0]
# if no shift necessary, itemindex will be
# empty, so don't do anything
if itemindex:
# check to see if cyclic (wraparound) point included
# if so, remove it.
if np.abs(lonsin1[0]-lonsin1[-1]) < 1.e-4:
hascyclic = True
lonsin_save = lonsin.copy()
lonsin = lonsin[:,1:]
if datain is not None:
datain_save = datain.copy()
datain = datain[:,1:]
else:
hascyclic = False
lonsin = np.where(lonsin > lon_0+180, lonsin-360 ,lonsin)
lonsin = np.where(lonsin < lon_0-180, lonsin+360 ,lonsin)
lonsin = np.roll(lonsin,itemindex-1,axis=1)
if datain is not None:
# np.roll works on ndarrays and on masked arrays
datain = np.roll(datain,itemindex-1,axis=1)
# add cyclic point back at beginning.
if hascyclic:
lonsin_save[:,1:] = lonsin
lonsin_save[:,0] = lonsin[:,-1]-360.
lonsin = lonsin_save
if datain is not None:
datain_save[:,1:] = datain
datain_save[:,0] = datain[:,-1]
datain = datain_save
# mask points outside
# map region so they don't wrap back in the domain.
mask = np.logical_or(lonsin<lon_0-180,lonsin>lon_0+180)
lonsin = np.where(mask,1.e30,lonsin)
if datain is not None and mask.any():
# superimpose on existing mask
datain = ma.masked_where(mask, datain)
# 1-d data.
elif lonsin.ndim == 1:
nlons = len(lonsin)
lonsin = np.where(lonsin > lon_0+180, lonsin-360 ,lonsin)
lonsin = np.where(lonsin < lon_0-180, lonsin+360 ,lonsin)
londiff = np.abs(lonsin[0:-1]-lonsin[1:])
londiff_sort = np.sort(londiff)
thresh = 360.-londiff_sort[-2]
itemindex = len(lonsin)-np.where(londiff>=thresh)[0]
if itemindex:
# check to see if cyclic (wraparound) point included
# if so, remove it.
if np.abs(lonsin[0]-lonsin[-1]) < 1.e-4:
hascyclic = True
lonsin_save = lonsin.copy()
lonsin = lonsin[1:]
if datain is not None:
datain_save = datain.copy()
datain = datain[1:]
else:
hascyclic = False
lonsin = np.roll(lonsin,itemindex-1)
if datain is not None:
datain = np.roll(datain,itemindex-1)
# add cyclic point back at beginning.
if hascyclic:
lonsin_save[1:] = lonsin
lonsin_save[0] = lonsin[-1]-360.
lonsin = lonsin_save
if datain is not None:
datain_save[1:] = datain
datain_save[0] = datain[-1]
datain = datain_save
# mask points outside
# map region so they don't wrap back in the domain.
mask = np.logical_or(lonsin<lon_0-180,lonsin>lon_0+180)
lonsin = np.where(mask,1.e30,lonsin)
if datain is not None and mask.any():
datain = ma.masked_where(mask, datain)
if datain is not None:
return lonsin, datain
else:
return lonsin
### End of Basemap class
def _searchlist(a,x):
"""
like bisect, but works for lists that are not sorted,
and are not in increasing order.
returns -1 if x does not fall between any two elements"""
# make sure x is a float (and not an array scalar)
x = float(x)
itemprev = a[0]
nslot = -1
eps = 180.
for n,item in enumerate(a[1:]):
if item < itemprev:
if itemprev-item>eps:
if ((x>itemprev and x<=360.) or (x<item and x>=0.)):
nslot = n+1
break
elif x <= itemprev and x > item and itemprev:
nslot = n+1
break
else:
if item-itemprev>eps:
if ((x<itemprev and x>=0.) or (x>item and x<=360.)):
nslot = n+1
break
elif x >= itemprev and x < item:
nslot = n+1
break
itemprev = item
return nslot
def interp(datain,xin,yin,xout,yout,checkbounds=False,masked=False,order=1):
"""
Interpolate data (``datain``) on a rectilinear grid (with x = ``xin``
y = ``yin``) to a grid with x = ``xout``, y= ``yout``.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
datain a rank-2 array with 1st dimension corresponding to
y, 2nd dimension x.
xin, yin rank-1 arrays containing x and y of
datain grid in increasing order.
xout, yout rank-2 arrays containing x and y of desired output grid.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
checkbounds If True, values of xout and yout are checked to see
that they lie within the range specified by xin
and xin.
If False, and xout,yout are outside xin,yin,
interpolated values will be clipped to values on
boundary of input grid (xin,yin)
Default is False.
masked If True, points outside the range of xin and yin
are masked (in a masked array).
If masked is set to a number, then
points outside the range of xin and yin will be
set to that number. Default False.
order 0 for nearest-neighbor interpolation, 1 for
bilinear interpolation, 3 for cublic spline
(default 1). order=3 requires scipy.ndimage.
============== ====================================================
.. note::
If datain is a masked array and order=1 (bilinear interpolation) is
used, elements of dataout will be masked if any of the four surrounding
points in datain are masked. To avoid this, do the interpolation in two
passes, first with order=1 (producing dataout1), then with order=0
(producing dataout2). Then replace all the masked values in dataout1
with the corresponding elements in dataout2 (using numpy.where).
This effectively uses nearest neighbor interpolation if any of the
four surrounding points in datain are masked, and bilinear interpolation
otherwise.
Returns ``dataout``, the interpolated data on the grid ``xout, yout``.
"""
# xin and yin must be monotonically increasing.
if xin[-1]-xin[0] < 0 or yin[-1]-yin[0] < 0:
raise ValueError('xin and yin must be increasing!')
if xout.shape != yout.shape:
raise ValueError('xout and yout must have same shape!')
# check that xout,yout are
# within region defined by xin,yin.
if checkbounds:
if xout.min() < xin.min() or \
xout.max() > xin.max() or \
yout.min() < yin.min() or \
yout.max() > yin.max():
raise ValueError('yout or xout outside range of yin or xin')
# compute grid coordinates of output grid.
delx = xin[1:]-xin[0:-1]
dely = yin[1:]-yin[0:-1]
if max(delx)-min(delx) < 1.e-4 and max(dely)-min(dely) < 1.e-4:
# regular input grid.
xcoords = (len(xin)-1)*(xout-xin[0])/(xin[-1]-xin[0])
ycoords = (len(yin)-1)*(yout-yin[0])/(yin[-1]-yin[0])
else:
# irregular (but still rectilinear) input grid.
xoutflat = xout.flatten(); youtflat = yout.flatten()
ix = (np.searchsorted(xin,xoutflat)-1).tolist()
iy = (np.searchsorted(yin,youtflat)-1).tolist()
xoutflat = xoutflat.tolist(); xin = xin.tolist()
youtflat = youtflat.tolist(); yin = yin.tolist()
xcoords = []; ycoords = []
for n,i in enumerate(ix):
if i < 0:
xcoords.append(-1) # outside of range on xin (lower end)
elif i >= len(xin)-1:
xcoords.append(len(xin)) # outside range on upper end.
else:
xcoords.append(float(i)+(xoutflat[n]-xin[i])/(xin[i+1]-xin[i]))
for m,j in enumerate(iy):
if j < 0:
ycoords.append(-1) # outside of range of yin (on lower end)
elif j >= len(yin)-1:
ycoords.append(len(yin)) # outside range on upper end
else:
ycoords.append(float(j)+(youtflat[m]-yin[j])/(yin[j+1]-yin[j]))
xcoords = np.reshape(xcoords,xout.shape)
ycoords = np.reshape(ycoords,yout.shape)
# data outside range xin,yin will be clipped to
# values on boundary.
if masked:
xmask = np.logical_or(np.less(xcoords,0),np.greater(xcoords,len(xin)-1))
ymask = np.logical_or(np.less(ycoords,0),np.greater(ycoords,len(yin)-1))
xymask = np.logical_or(xmask,ymask)
xcoords = np.clip(xcoords,0,len(xin)-1)
ycoords = np.clip(ycoords,0,len(yin)-1)
# interpolate to output grid using bilinear interpolation.
if order == 1:
xi = xcoords.astype(np.int32)
yi = ycoords.astype(np.int32)
xip1 = xi+1
yip1 = yi+1
xip1 = np.clip(xip1,0,len(xin)-1)
yip1 = np.clip(yip1,0,len(yin)-1)
delx = xcoords-xi.astype(np.float32)
dely = ycoords-yi.astype(np.float32)
dataout = (1.-delx)*(1.-dely)*datain[yi,xi] + \
delx*dely*datain[yip1,xip1] + \
(1.-delx)*dely*datain[yip1,xi] + \
delx*(1.-dely)*datain[yi,xip1]
elif order == 0:
xcoordsi = np.around(xcoords).astype(np.int32)
ycoordsi = np.around(ycoords).astype(np.int32)
dataout = datain[ycoordsi,xcoordsi]
elif order == 3:
try:
from scipy.ndimage import map_coordinates
except ImportError:
raise ValueError('scipy.ndimage must be installed if order=3')
coords = [ycoords,xcoords]
dataout = map_coordinates(datain,coords,order=3,mode='nearest')
else:
raise ValueError('order keyword must be 0, 1 or 3')
if masked and isinstance(masked,bool):
dataout = ma.masked_array(dataout)
newmask = ma.mask_or(ma.getmask(dataout), xymask)
dataout = ma.masked_array(dataout,mask=newmask)
elif masked and is_scalar(masked):
dataout = np.where(xymask,masked,dataout)
return dataout
def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0):
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if ma.isMA(datain):
dataout = ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if ma.isMA(lonsin):
lonsout = ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def addcyclic(arrin,lonsin):
"""
``arrout, lonsout = addcyclic(arrin, lonsin)``
adds cyclic (wraparound) point in longitude to ``arrin`` and ``lonsin``,
assumes longitude is the right-most dimension of ``arrin``.
"""
nlons = arrin.shape[-1]
newshape = list(arrin.shape)
newshape[-1] += 1
if ma.isMA(arrin):
arrout = ma.zeros(newshape,arrin.dtype)
else:
arrout = np.zeros(newshape,arrin.dtype)
arrout[...,0:nlons] = arrin[:]
arrout[...,nlons] = arrin[...,0]
if ma.isMA(lonsin):
lonsout = ma.zeros(nlons+1,lonsin.dtype)
else:
lonsout = np.zeros(nlons+1,lonsin.dtype)
lonsout[0:nlons] = lonsin[:]
lonsout[nlons] = lonsin[-1] + lonsin[1]-lonsin[0]
return arrout,lonsout
def _choosecorners(width,height,**kwargs):
"""
private function to determine lat/lon values of projection region corners,
given width and height of projection region in meters.
"""
p = pyproj.Proj(kwargs)
urcrnrlon, urcrnrlat = p(0.5*width,0.5*height, inverse=True)
llcrnrlon, llcrnrlat = p(-0.5*width,-0.5*height, inverse=True)
corners = llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat
# test for invalid projection points on output
if llcrnrlon > 1.e20 or urcrnrlon > 1.e20:
raise ValueError('width and/or height too large for this projection, try smaller values')
else:
return corners
def maskoceans(lonsin,latsin,datain,inlands=True,resolution='l',grid=5):
"""
mask data (``datain``), defined on a grid with latitudes ``latsin``
longitudes ``lonsin`` so that points over water will not be plotted.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lonsin, latsin rank-2 arrays containing longitudes and latitudes of
grid.
datain rank-2 input array on grid defined by ``lonsin`` and
``latsin``.
inlands if False, masked only ocean points and not inland
lakes (Default True).
resolution gshhs coastline resolution used to define land/sea
mask (default 'l', available 'c','l','i','h' or 'f')
grid land/sea mask grid spacing in minutes (Default 5;
10, 2.5 and 1.25 are also available).
============== ====================================================
returns a masked array the same shape as datain with "wet" points masked.
"""
# read in land/sea mask.
lsmask_lons, lsmask_lats, lsmask =\
_readlsmask(lakes=inlands,resolution=resolution,grid=grid)
# nearest-neighbor interpolation to output grid.
lsmasko = interp(lsmask,lsmask_lons,lsmask_lats,lonsin,latsin,masked=True,order=0)
# mask input data.
mask = lsmasko == 0
return ma.masked_array(datain,mask=mask)
def _readlsmask(lakes=True,resolution='l',grid=5):
# read in land/sea mask.
if grid == 10:
nlons = 2160
elif grid == 5:
nlons = 4320
elif grid == 2.5:
nlons = 8640
elif grid == 1.25:
nlons = 17280
else:
raise ValueError('grid for land/sea mask must be 10,5,2.5 or 1.25')
nlats = nlons/2
import gzip
lsmaskf =\
gzip.open(os.path.join(basemap_datadir,'lsmask_%smin_%s.bin' %\
(grid,resolution)), 'rb')
lsmask =\
np.reshape(np.fromstring(lsmaskf.read(),dtype=np.uint8),(nlats,nlons))
if lakes:
lsmask =\
np.where(lsmask==2,np.array(0,dtype=np.uint8),lsmask)
lsmaskf.close()
delta = 360./nlons
lsmask_lons = np.linspace(-180+0.5*delta,180-0.5*delta,nlons).astype(np.float32)
lsmask_lats = np.linspace(-90+0.5*delta,90-0.5*delta,nlats).astype(np.float32)
return lsmask_lons, lsmask_lats, lsmask
class _tup(tuple):
# tuple with an added remove method.
# used for objects returned by drawparallels and drawmeridians.
def remove(self):
for item in self:
for x in item:
x.remove()
class _dict(dict):
# override __delitem__ to first call remove method on values.
def __delitem__(self,key):
self[key].remove()
super(_dict, self).__delitem__(key)
def _setlonlab(fmt,lon,labelstyle):
# set lon label string (called by Basemap.drawmeridians)
try: # fmt is a function that returns a formatted string
lonlab = fmt(lon)
except: # fmt is a format string.
if lon>180:
if rcParams['text.usetex']:
if labelstyle=='+/-':
lonlabstr = r'${\/-%s\/^{\circ}}$'%fmt
else:
lonlabstr = r'${%s\/^{\circ}\/W}$'%fmt
else:
if labelstyle=='+/-':
lonlabstr = u'-%s\N{DEGREE SIGN}'%fmt
else:
lonlabstr = u'%s\N{DEGREE SIGN}W'%fmt
lonlab = lonlabstr%np.fabs(lon-360)
elif lon<180 and lon != 0:
if rcParams['text.usetex']:
if labelstyle=='+/-':
lonlabstr = r'${\/+%s\/^{\circ}}$'%fmt
else:
lonlabstr = r'${%s\/^{\circ}\/E}$'%fmt
else:
if labelstyle=='+/-':
lonlabstr = u'+%s\N{DEGREE SIGN}'%fmt
else:
lonlabstr = u'%s\N{DEGREE SIGN}E'%fmt
lonlab = lonlabstr%lon
else:
if rcParams['text.usetex']:
lonlabstr = r'${%s\/^{\circ}}$'%fmt
else:
lonlabstr = u'%s\N{DEGREE SIGN}'%fmt
lonlab = lonlabstr%lon
return lonlab
def _setlatlab(fmt,lat,labelstyle):
# set lat label string (called by Basemap.drawparallels)
try: # fmt is a function that returns a formatted string
latlab = fmt(lat)
except: # fmt is a format string.
if lat<0:
if rcParams['text.usetex']:
if labelstyle=='+/-':
latlabstr = r'${\/-%s\/^{\circ}}$'%fmt
else:
latlabstr = r'${%s\/^{\circ}\/S}$'%fmt
else:
if labelstyle=='+/-':
latlabstr = u'-%s\N{DEGREE SIGN}'%fmt
else:
latlabstr = u'%s\N{DEGREE SIGN}S'%fmt
latlab = latlabstr%np.fabs(lat)
elif lat>0:
if rcParams['text.usetex']:
if labelstyle=='+/-':
latlabstr = r'${\/+%s\/^{\circ}}$'%fmt
else:
latlabstr = r'${%s\/^{\circ}\/N}$'%fmt
else:
if labelstyle=='+/-':
latlabstr = u'+%s\N{DEGREE SIGN}'%fmt
else:
latlabstr = u'%s\N{DEGREE SIGN}N'%fmt
latlab = latlabstr%lat
else:
if rcParams['text.usetex']:
latlabstr = r'${%s\/^{\circ}}$'%fmt
else:
latlabstr = u'%s\N{DEGREE SIGN}'%fmt
latlab = latlabstr%lat
return latlab
|
gpl-3.0
|
mrshu/scikit-learn
|
examples/covariance/plot_robust_vs_empirical_covariance.py
|
2
|
5858
|
"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to
the presence of outliers in the data set. In such a case, one would
have better to use a robust estimator of covariance to garanty that
the estimation is resistant to "errorneous" observations in the data
set.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_samples-n_features-1}{2}` outliers) estimator of
covariance. The idea is to find :math:`\frac{n_samples+n_features+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance. After a correction
step aiming at compensating the fact the the estimates were learnt
from only a portion of the initial data, we end up with robust
estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
In this example, we compare the estimation errors that are made when
using three types of location and covariance estimates on contaminated
gaussian distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided n_samples > 5 * n_features
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
"""
print __doc__
import numpy as np
import pylab as pl
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
# generate data
X = np.random.randn(n_samples, n_features)
# add some outliers
outliers_index = np.random.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
S = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(S.location_ ** 2)
err_cov_mcd[i, j] = S.error_norm(np.eye(n_features))
# compare estimators learnt from the full data set with true parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learnt from a pure data set
# (i.e. "perfect" MCD)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
pl.subplot(2, 1, 1)
pl.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
pl.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
pl.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
pl.title("Influence of outliers on the location estimation")
pl.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
pl.legend(loc="upper left", prop=font_prop)
pl.subplot(2, 1, 2)
x_size = range_n_outliers.size
pl.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (MCD)", color='m')
pl.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
pl.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
pl.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
pl.title("Influence of outliers on the covariance estimation")
pl.xlabel("Amount of contamination (%)")
pl.ylabel("RMSE")
pl.legend(loc="upper center", prop=font_prop)
pl.show()
|
bsd-3-clause
|
pxzhang94/GAN
|
GAN/improved_wasserstein_gan/wgan_gp_tensorflow.py
|
1
|
3321
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
mb_size = 32
X_dim = 784
z_dim = 10
h_dim = 128
lam = 10
n_disc = 5
lr = 1e-4
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
X = tf.placeholder(tf.float32, shape=[None, X_dim])
D_W1 = tf.Variable(xavier_init([X_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
z = tf.placeholder(tf.float32, shape=[None, z_dim])
G_W1 = tf.Variable(xavier_init([z_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[X_dim]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
def sample_z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
def G(z):
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
G_prob = tf.nn.sigmoid(G_log_prob)
return G_prob
def D(X):
D_h1 = tf.nn.relu(tf.matmul(X, D_W1) + D_b1)
out = tf.matmul(D_h1, D_W2) + D_b2
return out
G_sample = G(z)
D_real = D(X)
D_fake = D(G_sample)
eps = tf.random_uniform([mb_size, 1], minval=0., maxval=1.)
X_inter = eps*X + (1. - eps)*G_sample
grad = tf.gradients(D(X_inter), [X_inter])[0]
grad_norm = tf.sqrt(tf.reduce_sum((grad)**2, axis=1))
grad_pen = lam * tf.reduce_mean(grad_norm - 1.)**2
D_loss = tf.reduce_mean(D_fake) - tf.reduce_mean(D_real) + grad_pen
G_loss = -tf.reduce_mean(D_fake)
D_solver = (tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
.minimize(D_loss, var_list=theta_D))
G_solver = (tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
.minimize(G_loss, var_list=theta_G))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if not os.path.exists('out/'):
os.makedirs('out/')
i = 0
for it in range(1000000):
for _ in range(n_disc):
X_mb, _ = mnist.train.next_batch(mb_size)
_, D_loss_curr = sess.run(
[D_solver, D_loss],
feed_dict={X: X_mb, z: sample_z(mb_size, z_dim)}
)
_, G_loss_curr = sess.run(
[G_solver, G_loss],
feed_dict={z: sample_z(mb_size, z_dim)}
)
if it % 1000 == 0:
print('Iter: {}; D loss: {:.4}; G_loss: {:.4}'
.format(it, D_loss_curr, G_loss_curr))
if it % 1000 == 0:
samples = sess.run(G_sample, feed_dict={z: sample_z(16, z_dim)})
fig = plot(samples)
plt.savefig('out/{}.png'
.format(str(i).zfill(3)), bbox_inches='tight')
i += 1
plt.close(fig)
|
apache-2.0
|
laluferu/hw_6
|
burgers/gif.py
|
1
|
1076
|
#! /usr/bin/python
# coding: utf-8
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
import numpy as np # Importo el módulo de numpy.
import matplotlib.pyplot as plt #Importa módulo de gráficos.
#Iportar archivo
tray = genfromtxt("archivo.dat",delimiter=",")
x= tray[:,0]
y= tray[:,1]
u= tray[:,2]
v= tray[:,3]
#Generador de imagen gif a partir de importar el modulo matplotlib como animation, alli se utiliza las funciones init() e instante(), para asi, generar el gif a partir de las dos funciones generadas que evaluan las condiciones ejercicio.
fig=plt.figure(figsize=(10,6), dpi=100)
ax = fig.gca(projection='3d')
X,Y = np.meshgrid(x,y)
w1 = ax.plot_wireframe(X,Y,u)
w2 = ax.plot_wireframe(X,Y,v)
tplot=plt.imshow(w1,cmap='gist_heat',vmin=0,vmax=1,interpolation='None')
def animate(i):
if i == 0:
init()
else:
instante()
tplot.set_array(w1)
animacion = animation.FuncAnimation(fig, animate, repeticiones,interval=100, blit=False)
animacion.save('gif.gif',writer='imagemagick', fps = 6, dpi=50)
plt.show()
|
mit
|
kramerfelix/accpy
|
accpy/visualize/plot.py
|
1
|
29849
|
# -*- coding: utf-8 -*-
''' accpy.visualize.plot
author: felix.kramer(at)physik.hu-berlin.de
'best' : 0, (only implemented for axes legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
'''
from __future__ import division
from numpy import (nanmax, nanmin, concatenate, empty, linspace, array, arange,
abs as npabs, sqrt, sin, cos, arccos as acos, nanmean)
from itertools import product
from matplotlib.figure import Figure
from matplotlib.pyplot import cm
from matplotlib.gridspec import GridSpec
from .stringformat import SI, SId
from .lattice import drawlattice
from ..simulate import const
from ..simulate.rmatrices import UCS2R
from ..simulate.tracking import trackpart
def plot(ax, x, y, ls, xlabel, xunit, ylabel, yunit, label, col=False,
setlim=True, rescaleX=True, rescaleY=True, xprefix=None, mx=None,
yprefix=None, my=None):
if xprefix is None:
xprefix, mx = SId(nanmean(abs(x)))
if yprefix is None:
yprefix, my = SId(nanmean(abs(y)))
if rescaleX:
x = x/mx # carefull! numpy.ndarrays are mutable!!!
if xunit != '':
xunit = ' / ('+xprefix+xunit+')'
elif xunit != '':
xunit = ' / ('+xunit+')'
if rescaleY:
y = y/my
if yunit != '':
yunit = ' / ('+yprefix+yunit+')'
elif yunit != '':
yunit = ' / ('+yunit+')'
if col is False:
ax.plot(x, y, ls, label=label)
else:
ax.plot(x, y, ls, color=col, label=label)
if xlabel != '':
ax.set_xlabel(xlabel+xunit)
if ylabel != '':
ax.set_ylabel(ylabel+yunit)
if setlim:
epsy = (max(y)-min(y))*0.15
ax.set_xlim([min(x), max(x)])
ax.set_ylim([min(y)-epsy, max(y)+epsy])
return x, y, yprefix, my
def Mplot(ax, x, ys, lss, xlabel, xunit, ylabel, yunit, labels, rescaleX=True, rescaleY=True):
colors = getcolors(len(ys))
xprefix, mx = SId(nanmean(x))
yprefix, my = SId(nanmean(ys))
if rescaleX:
if xunit != '':
xunit = ' / ('+xprefix+xunit+')'
elif xunit != '':
xunit = ' / ('+xunit+')'
if rescaleY:
if yunit != '':
yunit = ' / ('+yprefix+yunit+')'
elif yunit != '':
yunit = ' / ('+yunit+')'
if labels == '':
labels = ['' for i in range(len(ys))]
if type(x) != type([]):
xs = [x for i in range(len(ys))]
else:
xs = x
for x, y, ls, lab, col in zip(xs, ys, lss, labels, colors):
if rescaleY:
y = y/my
if rescaleX:
x = x/mx # carefull! numpy.ndarrays are mutable!!!
ax.plot(x, y, ls, color=col, label=lab)
if xlabel != '':
ax.set_xlabel(xlabel+xunit)
if ylabel != '':
ax.set_ylabel(ylabel+yunit)
ax.set_xlim([min(x), max(x)])
return mx, my
def legplot(ax, linestyles, labels, loc=0):
colors = getcolors(len(labels))
for ls, lab, col in zip(linestyles, labels, colors):
ax.plot([], [], ls, label=lab, color=col)
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.legend(fancybox=True, loc=loc)
return
def getcolors(number):
return list(cm.rainbow(linspace(0, 1, number)))
def plotoptic(UC, diagnostics, s, xtwiss, ytwiss, xdisp):
figs = [Figure() for i in range(4)]
ax = [figs[i].add_subplot(1, 1, 1) for i in range(4)]
drawlattice(ax[0], UC, diagnostics, [xtwiss[0, 0, :], xtwiss[0, 1, :]], 0)
ax[0].plot(s, xtwiss[0, 0, :], '-r', label=r'$\beta_x$')
ax[0].plot(s, -xtwiss[0, 1, :], '-c', label=r'$\alpha_x$')
ax[0].plot([], [], '-m', label=r'$\gamma_x$')
ax[0].set_ylabel(r'betatron function $\beta_x$ / (m)')
ax2 = ax[0].twinx()
ax2.plot(s, xtwiss[1, 1, :], '-m')
ax2.set_ylabel(r'gamma function $\gamma_x$ / (m)', color='m')
ax2.tick_params(axis='y', colors='m')
drawlattice(ax[1], UC, diagnostics, [ytwiss[0, 0, :], ytwiss[0, 1, :]], 0)
ax[1].plot(s, ytwiss[0, 0, :], '-b', label=r'$\beta_y$')
ax[1].plot(s, -ytwiss[0, 1, :], '-c', label=r'$\alpha_y$')
ax[1].plot([], [], '-m', label=r'$\gamma_y$')
ax[1].set_ylabel(r'betatron function $\beta_y$ / (m)')
ax2 = ax[1].twinx()
ax2.plot(s, ytwiss[1, 1, :], '-m', label=r'$\gamma_y$')
ax2.set_ylabel(r'gamma function $\gamma_y$ / (m)', color='m')
ax2.tick_params(axis='y', colors='m')
drawlattice(ax[2], UC, diagnostics, [xdisp[0, :]], 0)
ax[2].plot(s, xdisp[0, :], '-g', label=r'$D_x$')
ax[2].plot([], [], '-m', label=r'$D_x^\prime$')
ax[2].set_ylabel(r'dispersion function $D_x$ / (m)')
ax2 = ax[2].twinx()
ax2.plot(s, xdisp[1, :], '-m', label=r'$D_x^\prime$')
ax2.set_ylabel(r'derived dispersion function $D_x^\prime$ / (m)', color='m')
ax2.tick_params(axis='y', colors='m')
drawlattice(ax[3], UC, diagnostics, [xtwiss[0, 0, :], ytwiss[0, 0, :]], 0)
ax[3].plot(s, xtwiss[0, 0, :], '-r', label=r'$\beta_x$')
ax[3].plot(s, ytwiss[0, 0, :], '-b', label=r'$\beta_y$')
ax[3].plot([], [], '-g', label=r'$D_x$')
ax[3].set_ylabel(r'betatron function $\beta_{x,y}$ / (m)')
ax2 = ax[3].twinx()
ax2.plot(s, xdisp[0, :], '-g', label=r'$D_x$')
ax2.set_ylabel(r'dispersion function $D_x$ / (m)', color='g')
ax2.tick_params(axis='y', colors='g')
[ax[i].set_xlabel(r'orbit position s / (m)') for i in range(4)]
[ax[i].set_xlim([0, nanmax(s)]) for i in range(4)]
legs = [ax[i].legend(fancybox=True, loc=0) for i in range(4)]
[legs[i].get_frame().set_alpha(0.5) for i in range(4)]
return figs
def plotbeamsigma(UC, diagnostics, s, sigx, sigy):
fig = Figure()
ax = fig.add_subplot(1, 1, 1)
rel = abs(nanmean(sigy)/nanmean(sigx))
if rel > 100 or rel < 1e-2:
drawlattice(ax, UC, diagnostics, [sigx], 0)
ax.plot(s, sigx, '-r', label=r'$\sigma_x$')
ax.plot([], [], '-b', label=r'$\sigma_y$')
ax.set_ylabel(r'Beam extent $\sigma_x$ / (m)')
ax.set_xlabel(r'orbit position s / (m)')
ax2 = ax.twinx()
ax2.plot(s, sigy, '-b')
ax2.tick_params(axis='y', colors='b')
ax2.set_ylabel(r'Beam extent $\sigma_y$ / (m)', color='b')
else:
drawlattice(ax, UC, diagnostics, [sigx, sigy], 0)
ax.plot(s, sigx, '-r', label=r'$\sigma_x$')
ax.plot(s, sigy, '-b', label=r'$\sigma_y$')
ax.set_xlabel(r'orbit position s / (m)')
ax.set_ylabel(r'Beam extent $\sigma_u$ / (m)')
ax.set_xlim([min(s), max(s)])
leg = ax.legend(fancybox=True, loc=2)
leg.get_frame().set_alpha(0.5)
return fig
def plotopticpars_closed(xtwiss, xdisp, ytwiss, gamma, Qx, Xx, Jx, emiteqx,
tau_x, Qy, Xy, Jy, E, emiteqy, tau_y, alpha_mc,
eta_mc, gamma_tr, Q_s, Js, sigma_E, sigma_tau,
sigma_s, tau_s, U_rad, P_ges, E_c, lambda_c):
fig = Figure()
ax = fig.add_subplot(1, 1, 1)
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
radpars = ''.join(
[r'------------------------------------''\n',
r'Radial parameters''\n',
r'------------------------------------''\n',
r'$\beta_{x,max} = %g m$''\n' % nanmax(xtwiss[0, 0, :]),
r'$\beta_{x,min} = %g m$''\n' % nanmin(xtwiss[0, 0, :]),
r'$\alpha_{x,max} = %g$''\n' % nanmax(-xtwiss[0, 1, :]),
r'$\alpha_{x,min} = %g$''\n' % nanmin(-xtwiss[0, 1, :]),
r'$\gamma_{x,max} = %g$''\n' % nanmax(xtwiss[1, 1, :]),
r'$\gamma_{x,min} = %g$''\n' % nanmin(xtwiss[1, 1, :]),
r'$D_{x,max} = %g m$''\n' % nanmax(xdisp[0, :]),
r'$D_{x,min} = %g m$''\n' % nanmin(xdisp[0, :]),
r'$D_{x,max}^\prime = %g$''\n' % nanmax(xdisp[1, :]),
r'$D_{x,min}^\prime = %g$''\n' % nanmin(xdisp[1, :]),
r'$Q_x = %g$''\n' % Qx,
r'$\xi_{x,nat} = %g$''\n' % Xx,
r'$J_x = %g$''\n' % Jx,
r'$\epsilon_x = %g \pi rad m$''\n' % emiteqx,
r'$\tau_x = %e s$' % tau_x])
axipars = ''.join(
[r'------------------------------------''\n',
r'Axial parameters''\n',
r'------------------------------------''\n',
r'$\beta_{y,max} = %g m$''\n' % nanmax(ytwiss[0, 0, :]),
r'$\beta_{y,min} = %g m$''\n' % nanmin(ytwiss[0, 0, :]),
r'$\alpha_{y,max} = %g$''\n' % nanmax(-ytwiss[0, 1, :]),
r'$\alpha_{y,min} = %g$''\n' % nanmin(-ytwiss[0, 1, :]),
r'$\gamma_{x,max} = %g$''\n' % nanmax(ytwiss[1, 1, :]),
r'$\gamma_{x,min} = %g$''\n' % nanmin(ytwiss[1, 1, :]),
r'$Q_y = %g$''\n' % Qy,
r'$\xi_{y,nat} = %g$''\n' % Xy,
r'$J_y = %g$''\n' % Jy,
r'$\epsilon_y = %g \pi rad m$''\n' % emiteqy,
r'$\tau_y = %e s$' % tau_y])
lonpars = ''.join(
[r'------------------------------------''\n',
r'Longitudinal parameters''\n',
r'------------------------------------''\n',
r'$E = %g eV$''\n' % E,
r'$\gamma_{lorentz} = %g$''\n' % gamma,
r'$\alpha_{p} = %g $''\n' % alpha_mc,
r'$\eta_{slip} = %g $''\n' % eta_mc,
r'$\gamma_{tr} = %g $''\n' % gamma_tr,
r'$Q_s = %g$''\n' % Q_s,
r'$J_s = %g$''\n' % Js,
r'$\sigma_{E} = %e \%% $''\n' % (sigma_E*100),
r'$\sigma_{\tau} = %g s$''\n' % sigma_tau,
r'$\sigma_{s} = %g m$''\n' % sigma_s,
r'$\tau_{s} = %e s$''\n' % tau_s,
r'$E_{loss} = %g eV$''\n' % U_rad,
r'$P_{rad} = %g W$''\n' % P_ges,
r'$E_{crit} = %g eV$''\n' % E_c,
r'$\lambda_{crit} = %g m$' % lambda_c])
ax.text(0.025, 0.975, radpars, horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
ax.text(0.35, 0.975, axipars, horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
ax.text(0.675, 0.975, lonpars, horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
return fig
def plotopticpars_open(xtwiss, xdisp, ytwiss, gamma, E):
fig = Figure()
ax = fig.add_subplot(1, 1, 1)
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
radpars = ''.join(
[r'------------------------------------''\n',
r'Radial parameters''\n',
r'------------------------------------''\n',
r'$\beta_{x,max} = %g m$''\n' % nanmax(xtwiss[0, 0, :]),
r'$\beta_{x,min} = %g m$''\n' % nanmin(xtwiss[0, 0, :]),
r'$\alpha_{x,max} = %g$''\n' % nanmax(-xtwiss[0, 1, :]),
r'$\alpha_{x,min} = %g$''\n' % nanmin(-xtwiss[0, 1, :]),
r'$\gamma_{x,max} = %g$''\n' % nanmax(xtwiss[1, 1, :]),
r'$\gamma_{x,min} = %g$''\n' % nanmin(xtwiss[1, 1, :]),
r'$D_{x,max} = %g m$''\n' % nanmax(xdisp[0, :]),
r'$D_{x,min} = %g m$''\n' % nanmin(xdisp[0, :]),
r'$D_{x,max}^\prime = %g$''\n' % nanmax(xdisp[1, :]),
r'$D_{x,min}^\prime = %g$' % nanmin(xdisp[1, :])])
axipars = ''.join(
[r'------------------------------------''\n',
r'Axial parameters''\n',
r'------------------------------------''\n',
r'$\beta_{y,max} = %g m$''\n' % nanmax(ytwiss[0, 0, :]),
r'$\beta_{y,min} = %g m$''\n' % nanmin(ytwiss[0, 0, :]),
r'$\alpha_{y,max} = %g$''\n' % nanmax(-ytwiss[0, 1, :]),
r'$\alpha_{y,min} = %g$''\n' % nanmin(-ytwiss[0, 1, :]),
r'$\gamma_{x,max} = %g$''\n' % nanmax(ytwiss[1, 1, :]),
r'$\gamma_{x,min} = %g$' % nanmin(ytwiss[1, 1, :])])
lonpars = ''.join(
[r'------------------------------------''\n',
r'Longitudinal parameters''\n',
r'------------------------------------''\n',
r'$E = %g eV$''\n' % E,
r'$\gamma_{lorentz} = %g$' % gamma])
ax.text(0.025, 0.975, radpars, horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
ax.text(0.35, 0.975, axipars, horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
ax.text(0.675, 0.975, lonpars, horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes)
return fig
def plotdisptraj(s, P_UCS, E, E0, UCS, UC, diagnostics):
# measured energy dependant offset at FOMS normalized to 0 for EbE0=1
xf1t = lambda EbE0: -.078269*EbE0 + .078269 # + .059449
xf2t = lambda EbE0: -.241473*EbE0 + .241473 # + .229314
xf6t = lambda EbE0: 1.174523*EbE0 - 1.174523 # - 1.196090
xf7t = lambda EbE0: .998679*EbE0 - .998679 # - 1.018895
xf8t = lambda EbE0: .769875*EbE0 - .769875 # - .787049
steps = 6
X = [empty([6, P_UCS+1]) for i in range(steps)]
dEbE = linspace(-0.005, 0.005, steps)
for deltaE, i in zip(dEbE, range(steps)):
# R calculated for every energy (not necessary)
gamma = (E+deltaE*E)/E0+1
R = UCS2R(P_UCS, UCS, gamma)
X[i][:, 0] = array([0, 0, 0, 0, 0, deltaE])
X[i] = trackpart(X[i], R, P_UCS, P_UCS)*1e3
fig = Figure()
ax = fig.add_subplot(1, 1, 1)
drawlattice(ax, UC, diagnostics, X, 0)
ax.set_xlabel(r'orbit position s / (m)')
ax.set_ylabel(r'radial displacement / (mm)')
x = [s[UCS[0, :] == 7][i] for i in [0, 1, 5, 6, 7]]
color = iter(cm.rainbow(linspace(0, 1, steps)))
for i in range(steps):
c = next(color)
EE0 = 1 + dEbE[i]
y = array([xf1t(EE0), xf2t(EE0), xf6t(EE0), xf7t(EE0), xf8t(EE0)])*1e3
ax.plot(x, y, 'o', c=c)
ax.plot(s, X[i][0, :], c=c, label=r'$\delta={:g}$\textperthousand'.format(dEbE[i]*1e3))
ax.plot([], [], 'ok', label=r'measured')
#ax.get_xaxis().set_visible(False)
#leg = ax.legend(fancybox=True, loc=0)
#leg.get_frame().set_alpha(0.5)
ax.set_xlim([0, nanmax(s)])
return fig
def plottrajs(s, X, rounds, envelope):
figs = [Figure() for i in range(7)]
ax1 = [figs[i].add_subplot(1, 1, 1) for i in range(6)]
GS = GridSpec(5, 3)
ax2 = [figs[6].add_subplot(GS[:2, i]) for i in range(3)]
ax2 += [figs[6].add_subplot(GS[3:, i]) for i in range(3)]
ax3 = figs[6].add_subplot(GS[2, :])
ylabs = [r'$x$ radial displacement / (mm)',
r'$x^\prime$ radial direction deviation / (mrad)',
r'$y$ axial displacement / (mm)',
r'$y^\prime$ axial direction deviation / (mrad)',
r'$l$ longitudinal displacement / (mm)',
r'$\frac{\Delta P}{P_0}$ longitudinal momentum deviation']
y2labs = [r'$x$ / (mm)',
r'$x^\prime$ / (mrad)',
r'$y$ / (mm)',
r'$y^\prime$ / (mrad)',
r'$l$ / (mm)',
r'$\frac{\Delta P}{P_0}$ / \textperthousand']
for i in range(6):
ax1[i].set_xlabel(r'orbit position s / (m)')
ax1[i].set_ylabel(ylabs[i])
ax2[i].set_xlabel(r'orbit position s / (m)')
ax2[i].set_ylabel(y2labs[i])
color = iter(cm.rainbow(linspace(0, 1, 6)))
labs = [r'Ideal particle',
r'1 sigma particle',
r'Envelope $E_{x,y}(s)=\sqrt{\epsilon_{x,y}\beta_{x,y}(s)+(\delta_ED_{x,y}(s))^2)}$',
r'Envelope $E_{x}(s)=\sqrt{\epsilon_{x}\beta_{x}+(\delta_ED_{x}(s))^2}$',
r'Envelope $E_{y}(s)=\sqrt{\epsilon_{y}\beta_{y}}$',
r'Ensemble']
for i, j in zip(range(6), [0, 3, 1, 4, 2, 5]):
c = next(color)
y = []
y_ideal = []
y_sigma = []
for partN, traj in enumerate(X):
for k in range(rounds):
index = arange(len(s))+(len(s)-1)*k
if partN > 1:
y.append(traj[i, index]*1e3)
elif partN == 1:
y_sigma.append(traj[i, index]*1e3)
elif partN == 0:
y_ideal.append(traj[i, index]*1e3)
# ensemble trajectories
[ax1[i].plot(s, y[l], '-', c=c) for l in range(len(y))]
[ax2[j].plot(s, y[l], '-', c=c) for l in range(len(y))]
ax1[i].plot([], [], '-', c=c, label=labs[5])
ax2[j].plot([], [], '-', c=c, label=labs[5])
# 1-sigma particle trajectories
ax1[i].plot(s, y_sigma[0], '-b', label=labs[1])
ax2[j].plot(s, y_sigma[0], '-b')
[ax1[i].plot(s, y_sigma[l], '-b') for l in range(1, len(y_sigma))]
[ax2[j].plot(s, y_sigma[l], '-b') for l in range(1, len(y_sigma))]
# ideal particle trajectories
ax1[i].plot(s, y_ideal[0], '-k', label=labs[0])
if i == 0:
ax1[i].plot([], [], '-r', label=labs[3])
elif i == 2:
ax1[i].plot([], [], '-r', label=labs[4])
ax2[j].plot(s, y_ideal[0], '-k')
leg = ax1[i].legend(fancybox=True, loc='upper right')
leg.get_frame().set_alpha(0.5)
ax1[0].plot(s, envelope[0, :], '-r', s, -envelope[0, :], '-r')
ax1[2].plot(s, envelope[1, :], '-r', s, -envelope[1, :], '-r')
ax2[0].plot(s, envelope[0, :], '-r', s, -envelope[0, :], '-r')
ax2[2].plot(s, envelope[1, :], '-r', s, -envelope[1, :], '-r')
legplot(ax3, ['-k', '-b', '-r'], labs[:3], loc=10)
return figs
def plotphasespace(s, X, rounds, xtwiss, emittx, ytwiss, emitty):
fig = Figure()
xlabels = [r'$x$ / (mm)',
r'$y$ / (mm)']
ylabels = [r'$x^\prime$ / (mrad)',
r'$y^\prime$ / (mrad)']
titles = [r'Radial phasespace',
r'Axial phasespace']
axmax = []
def roundplot(traj, ax, linestyle, label=''):
for j in range(rounds):
index = (len(s)-1)*j
x = traj[i*2, index]*1e3
y = traj[i*2+1, index]*1e3
if j == 0:
ax.plot(x, y, linestyle, label=label)
else:
ax.plot(x, y, linestyle)
axmax.append(max(npabs([x, y])))
GS = GridSpec(1, 5)
ax = []
ax.append(fig.add_subplot(GS[0, :2]))
ax.append(fig.add_subplot(GS[0, -2:]))
ax2 = fig.add_subplot(GS[0, 2])
for i in range(2):
ax[i].set_xlabel(xlabels[i])
ax[i].set_ylabel(ylabels[i])
ax[i].set_title(titles[i])
for k in range(len(X)):
traj = X[k]
if k == 1:
roundplot(traj, ax[i], 'xr')
elif k == 2:
roundplot(traj, ax[i], '.b')
else:
roundplot(traj, ax[i], '.b')
axmax = max(axmax)
for i in range(2):
ax[i].set_xlim([-axmax, axmax])
ax[i].set_ylim([-axmax, axmax])
x, xp, y, yp = twissellipse(xtwiss[:, :, 0], emittx,
ytwiss[:, :, 0], emitty)
ax[0].plot(x, xp, '-g')
ax[1].plot(y, yp, '-g')
ax[1].yaxis.tick_right()
ax[1].yaxis.set_label_position('right')
ax2.plot([], [], '.b', label='Ensemble')
ax2.plot([], [], '-g', label='Twiss ellipsis')
ax2.plot([], [], 'xr', label='1 sigma particle')
ax2.axis('off')
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
leg = ax2.legend(fancybox=True, loc='center')
leg.get_frame().set_alpha(0.5)
return fig
def twissellipse(xtwiss, emittx, ytwiss, emitty):
def ellipse(emittance, beta, alpha, gamma):
phi = linspace(0, 2*const.pi, 1e3)
a = sqrt(emittance/2*(beta+gamma+sqrt((beta+gamma)**2-4)))
b = sqrt(emittance/2*(beta+gamma-sqrt((beta+gamma)**2-4)))
if alpha > 0:
PHI = acos(+sqrt((beta-b/a)*emittance/(a**2-b**2)))
else:
PHI = acos(-sqrt((beta-b/a)*emittance/(a**2-b**2)))
pos = a*cos(phi)*cos(PHI)+b*sin(phi)*sin(PHI)
mom = -a*cos(phi)*sin(PHI)+b*sin(phi)*cos(PHI)
return pos, mom
x, xp = ellipse(emittx, xtwiss[0, 0], -xtwiss[0, 1], xtwiss[1, 1])
y, yp = ellipse(emitty, ytwiss[0, 0], -ytwiss[0, 1], ytwiss[1, 1])
return x, xp, y, yp
def plotramp(T, t, tt, tt2, tEgZ, tAI, tVgZ, E, EE, EEgZ, EAI, EVgZ, B, BB, loss, LL, volt,
VV, phases, freqs, Xemitequi, Yemitequi, Semitequi, bdurequis,
blenequis, V_HFs, Xemits, Yemits, Semits, NXemitequi, NYemitequi,
NXemits, NYemits, bdurs, blens, t3, FF):
def annotate(ax, xs, ys, ss, epsxs, epsys, hs, vs):
for x, y, s, h, v, epsx, epsy in zip(xs, ys, ss, hs, vs, epsxs, epsys):
ax.text(x+epsx, y+epsy, s, horizontalalignment=h, verticalalignment=v)
Nfigs = 10
legs = []
figs = [Figure() for i in range(Nfigs)]
s = [r'Injection''\n(',
r'Extraction''\n(',
r'Maximum energy''\n(',
r'Alternative extraction''\n(',
r'Zero Voltage''\n(',
r'Maximum Voltage''\n(']
s1 = [s[0]+SI(tt[0])+'s, '+SI(EE[0]) + 'eV)',
s[1]+SI(tt[1])+'s, '+SI(EE[1]) + 'eV)',
s[2]+SI(tt[2])+'s, '+SI(EE[2]) + 'eV)',
s[3]+SI(tt[3])+'s, '+SI(EE[3]) + 'eV)']
s2 = [s[0]+SI(tt[0])+'s, '+SI(BB[0]) + 'T)',
s[1]+SI(tt[1])+'s, '+SI(BB[1]) + 'T)',
s[2]+SI(tt[2])+'s, '+SI(BB[2]) + 'T)',
s[3]+SI(tt[3])+'s, '+SI(BB[3]) + 'T)']
s3 = [s[0]+SI(tt[0])+'s, '+SI(LL[0]) + 'eV)',
s[1]+SI(tt[1])+'s, '+SI(LL[1]) + 'eV)',
s[2]+SI(tt[2])+'s, '+SI(LL[2]) + 'eV)',
s[3]+SI(tt[3])+'s, '+SI(LL[3]) + 'eV)']
s4 = [s[0]+SI(tt2[0])+'s, '+SI(VV[0]) + 'V)',
s[1]+SI(tt2[1])+'s, '+SI(VV[1]) + 'V)',
s[2]+SI(tt2[2])+'s, '+SI(VV[2]) + 'V)',
s[3]+SI(tt2[3])+'s, '+SI(VV[3]) + 'V)',
s[4]+SI(tt2[4])+'s, '+SI(VV[4]) + 'V)',
s[5]+SI(tt2[5])+'s, '+SI(VV[5]) + 'V)']
xlab, xunit = 'Time', 's'
xlab2, xunit2 = 'Energy', 'eV'
# Energy
ax = figs[0].add_subplot(1, 1, 1)
_, mx = SId(nanmean(abs(t)))
epsT = array([1, -1, 0, 1])*.01*T/mx
ha = ['left', 'right', 'center', 'left']
va = ['top', 'bottom', 'bottom', 'bottom']
_, _, yprefix, my = plot(ax, t, E, '-r', 'Time', 's', 'Energy', 'eV', 'calculated curve')
ttn, EEn, _, _ = plot(ax, tt, EE, '+k', '', '', '', '', 'known points', yprefix=yprefix, my=my, setlim=False)
legs.append(ax.legend(fancybox=True, loc='center'))
epsY = array([-1, 0, 1, 0])*.03*max(EEn)
annotate(ax, ttn, EEn, s1, epsT, epsY, ha, va)
# Magnetic flux
ax = figs[1].add_subplot(1, 1, 1)
_, _, yprefix, my = plot(ax, t, B, '-r', 'Time', 's', 'Magnetic flux density', 'T', 'calculated curve')
ttn, BBn, _, _ = plot(ax, tt, BB, '+k', '', '', '', '', 'known points', yprefix=yprefix, my=my, setlim=False)
legs.append(ax.legend(fancybox=True, loc='lower center'))
epsY = array([-1, 0, 1, 0])*.03*max(BBn)
annotate(ax, ttn, BBn, s2, epsT, epsY, ha, va)
# Energy loss
ax = figs[2].add_subplot(1, 1, 1)
_, _, yprefix, my = plot(ax, t, loss, '-r', 'Time', 's', 'Energyloss per turn', 'eV', '')
ttn, LLn, _, _ = plot(ax, tt, LL, '+k', '', '', '', '', 'known points', yprefix=yprefix, my=my, setlim=False)
epsY = array([-1, 0, 1, 0])*.03*max(LLn)
annotate(ax, ttn, LLn, s3, epsT, epsY, ha, va)
# Acceleration voltage
ax = figs[3].add_subplot(1, 1, 1)
epsT = array([1, -1, 1, 1, 1, -1])*.01*T/mx
ha = ['left', 'right', 'left', 'left', 'left', 'right']
va = ['top', 'bottom', 'bottom', 'bottom', 'bottom', 'bottom']
_, _, yprefix, my = plot(ax, t, volt, '-r', 'Time', 's', 'Required acceleration voltage', 'V', '')
ttn, VVn, _, _ = plot(ax, tt2, VV, '+k', '', '', '', '', 'known points', yprefix=yprefix, my=my, setlim=False)
epsY = array([-1, 0, 1, 0, -1, 1])*.02*max(VVn)
annotate(ax, ttn, VVn, s4, epsT, epsY, ha, va)
# Synchronous phase
ax = figs[4].add_subplot(1, 1, 1)
labs = ['Cavity @ {0:g} kV'.format(V_HF/1e3) for V_HF in V_HFs]
color = iter(cm.rainbow(linspace(0, 1, len(phases))))
[plot(ax, t, y, '-', 'Time', 's', 'Cavity Phase', r'2$\pi$',
l, col=next(color), setlim=False) for l, y in zip(labs, phases)]
legs.append(ax.legend(fancybox=True, loc='center right'))
# Synchrotron frequency
ax = figs[5].add_subplot(1, 1, 1)
colors = getcolors(len(freqs))
[plot(ax, tVgZ, y/1e3, '-', 'Time', 's', 'Synchrotron frequency', r'kHz',
l, col=c, setlim=False) for l, y, c in zip(labs, freqs, colors)]
[ax.plot(t3*1e3, x/1e3, '+k', markersize=24.0) for x in FF]
legs.append(ax.legend(fancybox=True, loc=1))
# Bunchlength and duration
GS = GridSpec(2, 2)
GS = [GS[:, 1], GS[0, 0], GS[1, 0]]
ax = [figs[6].add_subplot(gs) for gs in GS]
labs = [r'$\delta_{{E,equilibrium}}$, $V_{{max}} = {0:g}$ kV'.format(V_HF/1e3) for V_HF in V_HFs]
lss = ['-.' for x in range(len(bdurequis))]
labs += [r'$\delta_{{E,0}}={0:.1f}$ \textperthousand, $V_{{max}}={1:g}$ kV'.format(y[0], V_HF/1e3) for V_HF, y in product(V_HFs, Semits)]
lss += ['-' for x in range(len(bdurs))]
legplot(ax[0], lss, labs, loc=6)
Mplot(ax[1], tVgZ, bdurequis+bdurs, lss, '', '', 'Bunch duration', 's', '')
Mplot(ax[2], tVgZ, blenequis+blens, lss, xlab, xunit, 'Bunch length', 'm', '')
# Emittance
yunit = r'm $\pi$ rad'
# Radial Emittance
ax = [figs[7].add_subplot(2, 2, i) for i in range(1, 5)]
labs = ['Equilibrium']+[r'$\epsilon_0=$ {0:g} nm rad'.format(y[0]*1e9) for y in Xemits]
lss = ['-.']+['-' for x in range(len(Xemits))]
ylab, ylab2 = r'$\epsilon_x$', r'$\epsilon_x^*$'
Mplot(ax[0], tAI, [Xemitequi]+Xemits, lss, '', '', ylab, yunit, labs)
Mplot(ax[1], EAI, [Xemitequi]+Xemits, lss, '', '', '', '', '')
Mplot(ax[2], tAI, [NXemitequi]+NXemits, lss, xlab, xunit, ylab2, yunit, '')
Mplot(ax[3], EAI, [NXemitequi]+NXemits, lss, xlab2, xunit2, '', '', '')
legs.append(ax[0].legend(fancybox=True, loc=2))
for axi in ax:
ax.axvline(tt[0]) # Injection
ax.axvline(tt[1]) # Extraction
ax.axvline(tt[3]) # 2nd Extraction
# Axial Emittance
ax = [figs[8].add_subplot(2, 2, i) for i in range(1, 5)]
labs = ['Limit']+[r'$\epsilon_0=$ {0:g} nm rad'.format(y[0]*1e9) for y in Yemits]
lss = ['-.']+['-' for x in range(len(Yemits))]
ylab, ylab2 = r'$\epsilon_y$', r'$\epsilon_y^*$'
Mplot(ax[0], tAI, [Yemitequi]+Yemits, lss, '', '', ylab, yunit, labs)
Mplot(ax[1], EAI, [Yemitequi]+Yemits, lss, '', '', '', '', '')
Mplot(ax[2], tAI, [NYemitequi]+NYemits, lss, xlab, xunit, ylab2, yunit, '')
Mplot(ax[3], EAI, [NYemitequi]+NYemits, lss, xlab2, xunit2, '', '', '')
legs.append(ax[0].legend(fancybox=True, loc=1))
# Longitudinal Emittance
ax = [figs[9].add_subplot(1, 2, i) for i in range(1, 3)]
labs = ['Equilibrium'] + [r'$\epsilon_0=$ {} \textperthousand'.format(y[0]) for y in Semits]
lss = ['-.']+['-' for x in range(len(Semits))]
ylab, yunit = r'$\delta_E=\frac{\sigma_E}{E_0}$', r'$\alpha$'
Mplot(ax[0], tAI, [Semitequi]+Semits, lss, xlab, xunit, ylab, yunit, labs, rescaleY=False)
Mplot(ax[1], EAI, [Semitequi]+Semits, lss, xlab2, xunit2, '', '', '', rescaleY=False)
legs.append(ax[0].legend(fancybox=True, loc=1))
[leg.get_frame().set_alpha(0.5) for leg in legs]
return figs
def pltsim_quadscan(k, sigx, sigy, sigx2, sigy2, sigxdf, sigydf, sigx2df, sigy2df, data=None):
xlabel, xunit = 'Quadrupole strength', 'm'
ylabel1, yunit1 = r'$\sigma_x^2$', r'$mm^2$'
ylabel2, yunit2 = r'$\sigma_y^2$', r'$mm^2$'
figs = [Figure()]
ax = [figs[0].add_subplot(1, 2, i) for i in range(1, 3)]
plot(ax[0], k, sigx*1e6, '-g', xlabel, xunit, ylabel1, yunit1, '', rescaleY=False)
plot(ax[0], k, sigxdf*1e6, '--g', xlabel, xunit, ylabel1, yunit1, 'neglect dispersion', rescaleY=False)
plot(ax[0], k, sigx2*1e6, '-r', xlabel, xunit, ylabel1, yunit1, '', rescaleY=False)
plot(ax[0], k, sigx2df*1e6, '--r', xlabel, xunit, ylabel1, yunit1, 'neglect dispersion', rescaleY=False)
plot(ax[1], k, sigy*1e6, '-g', xlabel, xunit, ylabel2, yunit2, 'Full linear quadrupole modell', rescaleY=False)
ax[1].plot([], [], '--g', label='neglect dispersion')
plot(ax[1], k, sigy2*1e6, '-r', xlabel, xunit, ylabel2, yunit2, 'Thin lens', rescaleY=False)
ax[1].plot([], [], '--r', label='neglect dispersion')
if data is not None:
plot(ax[0], data[0], data[1]*1e6, 'xb', xlabel, xunit, ylabel1, yunit1, 'Data', rescaleY=False)
plot(ax[1], data[0], data[2]*1e6, 'xb', xlabel, xunit, ylabel2, yunit2, '', rescaleY=False)
leg = ax[1].legend(fancybox=True, loc=0)
leg.get_frame().set_alpha(0.5)
return figs
def pltmeas_quadscan(figs, kx, sigx, ky, sigy, kfx, fitx, kfy, fity, strings, xerr=None, yerr=None):
ax = [figs[0].add_subplot(1, 2, i) for i in range(1, 3)]
xlabel, xunit = 'Quadrupole strength', r'$m^{-2}$'
ylabel1, yunit1 = r'$\sigma_x^2$', r'$mm^2$'
ylabel2, yunit2 = r'$\sigma_y^2$', r'$mm^2$'
if xerr is None:
plot(ax[0], kx, sigx*1e6, 'xb', xlabel, xunit, ylabel1, yunit1, 'Data', rescaleY=False)
else:
ax[0].errorbar(kx, sigx*1e6, yerr=xerr*1e6, marker='x', mfc='blue', ls='None', label='Data')
plot(ax[0], kfx, fitx*1e6, '-r', xlabel, xunit, ylabel1, yunit1, 'Fit', rescaleY=False)
if yerr is None:
plot(ax[1], ky, sigy*1e6, 'xb', xlabel, xunit, ylabel2, yunit2, '', rescaleY=False)
else:
ax[1].errorbar(ky, sigy*1e6, yerr=yerr*1e6, marker='x', mfc='blue', ls='None', label='Data')
plot(ax[1], kfy, fity*1e6, '-r', xlabel, xunit, ylabel1, yunit1, '', rescaleY=False)
ax[0].text(0.95, 0.95, strings[0],horizontalalignment='right', verticalalignment='top', transform=ax[0].transAxes)
ax[1].text(0.05, 0.95, strings[1],horizontalalignment='left', verticalalignment='top', transform=ax[1].transAxes)
leg = ax[0].legend(fancybox=True, loc=3)
leg.get_frame().set_alpha(0.5)
[fig.canvas.draw() for fig in figs]
return
|
gpl-3.0
|
mrlb05/Nifty
|
tests/generate_response_curve.py
|
3
|
1827
|
import astropy.io.fits
import numpy as np
import matplotlib.pyplot as plt
# Create an empty numpy array. 2D; spectra with 4 data elements.
filtered = np.zeros((2040,4))
combined_extracted_1d_spectra_ = astropy.io.fits.open("xtfbrsnN20160705S0025.fits")
exptime = float(combined_extracted_1d_spectra_[0].header['EXPTIME'])
wstart = combined_extracted_1d_spectra_[1].header['CRVAL1']
wdelt = combined_extracted_1d_spectra_[1].header['CD1_1']
for i in range(len(filtered)):
filtered[i][0] = wstart + (i*wdelt)
print "Wavelength array: \n", filtered
f = open("hk.txt")
lines = f.readlines()
f.close()
lines = [lines[i].strip().split() for i in range(len(lines))]
for i in range(len(lines)):
lines[i][0] = float(lines[i][0])*10**4
for i in range(len(filtered)):
mindif = min(lines, key=lambda x:abs(x[0]-filtered[i][0]))
filtered[i][1] = mindif[2]
calibspec = np.load("calibspec.npy")
"""
effspec = np.load("effspec.npy")
print "Effspec:\n", effspec
calibspec = np.zeros((2040))
for i in range(len(effspec)):
if effspec[i] != 0:
calibspec[i] = combined_extracted_1d_spectra_[1].data[i]/exptime/effspec[i]
else:
calibspec[i] = 0
"""
filter_weighted_flux = []
temp_percentages = []
for i in range(len(calibspec)):
filtered[i][2] = calibspec[i]
filtered[i][3] = filtered[i][1] * filtered[i][2] * 0.01
filter_weighted_flux.append(filtered[i][3])
temp_percentages.append(filtered[i][1]*0.01)
print "\nIntegral of filter_weighted_flux:"
print np.trapz(filter_weighted_flux)
print "\nIntegral of percentages:"
print np.trapz(temp_percentages)
print "Integral of filter_weighted_flux divided by integral of percentages:"
print np.trapz(filter_weighted_flux)/np.trapz(temp_percentages)
plt.figure(1)
plt.plot(calibspec)
plt.plot(filter_weighted_flux, "r--")
plt.figure(2)
plt.plot(temp_percentages)
plt.show()
|
mit
|
klpn/seregmort
|
seregmort/__init__.py
|
1
|
18324
|
import requests
import json
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import cartopy.io.shapereader as shpreader
import cartopy.crs as ccrs
import statsmodels.api as sm
import pandas as pd
from pyjstat import pyjstat
from collections import OrderedDict
from sqlalchemy import create_engine
mpl.rcParams['axes.formatter.use_locale'] = True
mpl.style.use('ggplot')
morturl = 'http://api.scb.se/OV0104/v1/doris/sv/ssd/START/HS/HS0301/DodaOrsak'
popurl = 'http://api.scb.se/OV0104/v1/doris/sv/ssd/START/BE/BE0101/BE0101A/BefolkningNy'
g_units = pd.read_csv('naddata/g_units_names.csv', index_col = 'ref')
def scb_to_unit(scb):
"""Convert codes used by Statistics Sweden to units used by the NAD GIS files."""
scbform = 'SE/' + '{:0<9}'.format(scb)
if scbform in g_units.index:
return g_units.loc[scbform, 'G_unit']
else:
return 0
def metadata(url):
"""Fetch JSON metadata from url."""
req = requests.get(url)
return json.loads(req.content.decode('utf-8'), object_pairs_hook = OrderedDict)
def svreg_engine(dbfile):
return create_engine('sqlite:///' + dbfile)
def save_frame(ndeaths, dbfile):
"""Save a dataframe into a SQLite database."""
engine = svreg_engine(dbfile)
ndeaths['frame'].to_sql('regdeaths', engine, if_exists = 'append')
def save_dimension(ndeaths, filename):
"""Save returned metadata into a JSON file."""
with open(filename, 'w') as f:
json.dump(ndeaths['dimension'], f, ensure_ascii=False)
def is_county(region):
return len(region) == 2 and region != '00'
def is_municipality(region):
return len(region) == 4
def allages(ageformat = 'mort'):
"""Returns the age groups used by Statistics Sweden."""
if ageformat == 'mort':
startint = '0'
startages = [1] + list(range(5, 90, 5))
endages = [4] + list(range(9, 94, 5))
endint = '90+'
elif ageformat == 'pop':
startint = '-4'
startages = list(range(5, 100, 5))
endages = list(range(9, 104, 5))
endint = '100+'
midints = ['{0}-{1}'.format(s, e) for s, e in zip(startages, endages)]
return [startint] + midints + [endint]
def ageintmerge():
return pd.DataFrame({'mortAlder': allages() + [allages()[-1]]*2,
'Alder': [allages('pop')[0]] + allages('pop')})
def agesplitter(age):
if '-' in age:
return age.split('-')
else:
return [age]
def ageslice(startage, endage, mean):
ages = allages()
startind = ages.index(startage)
endind = ages.index(endage)
if mean:
meanstr = " medel över åldrar"
else:
meanstr = ""
if startage == endage:
alias = startage.replace('-', '\u2013') + meanstr
else:
alias = agesplitter(startage)[0] + '\u2013' + agesplitter(endage)[-1] + meanstr
agelist = ages[startind:endind+1]
return {'agelist': agelist, 'alias': alias}
def causealias(cause, dim):
if cause == 'POP':
return dim['ContentsCode']['category']['label']['BE0101N1'].lower()
else:
return dim['Dodsorsak']['category']['label'][cause]
def allregions(level, metadict):
"""Return all regions at county or municipality level."""
regvalues = metadict['variables'][0]['values']
if level == 'county':
return list(filter(is_county, regvalues))
elif level == 'municipality':
return list(filter(is_municipality, regvalues))
def unchanged_county(region):
return region[0:2] not in ['11', '12', '14', '15', '16']
def unchanged_regions(level, metadict):
regvalues = allregions(level, metadict)
return list(filter(unchanged_county, regvalues))
def munis_incounty(county, metadict):
"""Return all municipalities in the county given."""
regvalues = metadict['variables'][0]['values']
return [region for region in regvalues
if is_municipality(region) and region.startswith(county)]
def yearrange(start = 1969, end = 1996):
return [str(year) for year in range(start, end+1)]
def mortreqjson(regvalues, causevalues, agevalues = allages(),
sexvalues = ['1', '2'], yearvalues = yearrange()):
"""Prepare a JSON request to return number of deaths."""
if '-' in causevalues[0]:
causefilter = 'agg:DödsorsakKapitel'
else:
causefilter = 'item'
if is_county(regvalues[0]):
regfilter = 'vs:RegionLän'
elif is_municipality(regvalues[0]):
regfilter = 'vs:RegionKommun95'
return {'response': {'format': 'json-stat'},
'query': [{'selection': {'filter': regfilter, 'values': regvalues},
'code': 'Region'},
{'selection': {'filter': causefilter, 'values': causevalues},
'code': 'Dodsorsak'},
{'selection': {'filter': 'item', 'values': agevalues}, 'code': 'Alder'},
{'selection': {'filter': 'item', 'values': sexvalues}, 'code': 'Kon'},
{'selection': {'filter': 'item', 'values': yearvalues}, 'code': 'Tid'}]}
def popreqjson(regvalues, agevalues = allages('pop'),
sexvalues = ['1', '2'], yearvalues = yearrange()):
"""Prepare a JSON request to return population size."""
if is_county(regvalues[0]):
regfilter = 'vs:RegionLän07'
elif is_municipality(regvalues[0]):
regfilter = 'vs:RegionKommun07'
return {'response': {'format': 'json-stat'},
'query': [{'selection': {'filter': regfilter, 'values': regvalues},
'code': 'Region'},
{'selection': {'filter': 'agg:Ålder5år', 'values': agevalues}, 'code': 'Alder'},
{'selection': {'filter': 'item', 'values': sexvalues}, 'code': 'Kon'},
{'selection': {'filter': 'item', 'values': ['BE0101N1']}, 'code': 'ContentsCode'},
{'selection': {'filter': 'item', 'values': yearvalues}, 'code': 'Tid'}]}
def ndeaths(regvalues, causevalues, agevalues = allages(),
sexvalues = ['1', '2'], yearvalues = yearrange()):
"""Send a JSON request to return number of deaths."""
qjson = mortreqjson(regvalues, causevalues, agevalues, sexvalues, yearvalues)
req = requests.post(morturl, json = qjson)
req.raise_for_status()
respstr = req.content.decode('utf-8')
respjson = json.loads(respstr, object_pairs_hook = OrderedDict)
return {'dimension': respjson['dataset']['dimension'],
'frame': pyjstat.from_json_stat(respjson, naming = 'id')[0]}
def npop(regvalues, agevalues = allages('pop'),
sexvalues = ['1', '2'], yearvalues = yearrange()):
"""Send a JSON request to return population size."""
qjson = popreqjson(regvalues, agevalues, sexvalues, yearvalues)
req = requests.post(popurl, json = qjson)
req.raise_for_status()
respstr = req.content.decode('utf-8')
respjson = json.loads(respstr, object_pairs_hook = OrderedDict)
popframe = pyjstat.from_json_stat(respjson, naming = 'id')[0]
popmerged = pd.merge(ageintmerge(), popframe, on = 'Alder')
return {'dimension': respjson['dataset']['dimension'],
'frame': popmerged}
def smoother(col, index):
"""Smooth time trends."""
return sm.nonparametric.lowess(col, index, frac = 0.4)
def prop_timegrp(numframe, numcause, denomframe, denomcause, sex, region, agelist, years):
numframe_sub = numframe[(numframe.Kon == sex) & (numframe.Dodsorsak == numcause)
& (numframe.Region == region) & (numframe.Alder.isin(agelist))
& (numframe.Tid.isin(years))].groupby(['Tid'])
if denomcause == 'POP':
denomframe_sub = denomframe[(denomframe.Kon == sex) &
(denomframe.Region == region) & (denomframe.Alder.isin(agelist))
& (denomframe.Tid.isin(years))].groupby(['Tid'])
else:
denomframe_sub = denomframe[(denomframe.Kon == sex) &
(denomframe.Dodsorsak == denomcause) &
(denomframe.Region == region) & (denomframe.Alder.isin(agelist))
& (denomframe.Tid.isin(years))].groupby(['Tid'])
return numframe_sub.value.sum() / denomframe_sub.value.sum()
def propplotyrs(numframe, denomframe, numdim, denomdim, numcause, denomcause,
region, startage, endage, years = yearrange(), sexes = ['2', '1'], mean = False):
"""Plot a time trend for the number of deaths of one cause relative to another."""
plt.close()
numcausealias = causealias(numcause, numdim)
denomcausealias = causealias(denomcause, denomdim)
regalias = numdim['Region']['category']['label'][region].replace(region, '').lstrip()
ages = ageslice(startage, endage, mean)
agealias = ages['alias']
agelist = ages['agelist']
yrints = list(map(int, years))
for sex in sexes:
sexalias = numdim['Kon']['category']['label'][sex]
if mean:
ageprops = []
for age in agelist:
ageprops.append(prop_timegrp(numframe, numcause,
denomframe, denomcause, sex, region, [age], years))
aptransp = np.transpose(ageprops)
prop = list(map(np.mean, aptransp))
else:
prop = prop_timegrp(numframe, numcause, denomframe, denomcause,
sex, region, agelist, years)
plt.plot(yrints, prop, label = sexalias)
sex_smo = smoother(prop, yrints)
plt.plot(sex_smo[:, 0], sex_smo[:, 1], label = sexalias + ' jämnad')
plt.legend(framealpha = 0.5)
plt.xlim(yrints[0], yrints[-1])
plt.ylim(ymin = 0)
plt.title('Döda {numcausealias}/{denomcausealias}\n{agealias} {regalias}'
.format(**locals()))
def prop_reggrp(numframe, numcause, denomframe, denomcause, sex, agelist, mean = False):
if mean:
agepropdicts = []
ageprops = []
for age in agelist:
agepropdicts.append(propdiv_reggrp(numframe, numcause,
denomframe, denomcause, sex, [age]))
ageprops.append(agepropdicts[-1]['prop'])
regvalues = agepropdicts[0]['regvalues']
aptransp = np.transpose(ageprops)
prop = list(map(np.mean, aptransp))
return {'prop': prop, 'regvalues': regvalues}
else:
return propdiv_reggrp(numframe, numcause, denomframe, denomcause, sex, agelist)
def propdiv_reggrp(numframe, numcause, denomframe, denomcause, sex, agelist):
numframe_sub = numframe[(numframe.Kon == sex) &
(numframe.Dodsorsak == numcause)
& (numframe.Alder.isin(agelist))].groupby(['Region'])
if denomcause == 'POP':
denomframe_sub = denomframe[(denomframe.Kon == sex) &
(denomframe.Alder.isin(agelist))].groupby(['Region'])
else:
denomframe_sub = denomframe[(denomframe.Kon == sex) &
(denomframe.Dodsorsak == denomcause) &
(denomframe.Alder.isin(agelist))].groupby(['Region'])
return {'prop': numframe_sub.value.sum() / denomframe_sub.value.sum(),
'regvalues': list(numframe_sub.Region.all())}
def propscatsexes(numframe, denomframe, numdim, denomdim, numcause, denomcause,
startage, endage, mean = False, **kwargs):
"""Plot the number of deaths of one cause relative to another for females vs males."""
plt.close()
numcausealias = causealias(numcause, numdim)
denomcausealias = causealias(denomcause, denomdim)
ages = ageslice(startage, endage, mean)
agealias = ages['alias']
agelist = ages['agelist']
startyear = min(numframe.Tid)
endyear = max(numframe.Tid)
sexframes = dict()
for sex in ['2', '1']:
sexframes[sex] = dict()
sexframes[sex]['alias'] = numdim['Kon']['category']['label'][sex]
propdict = prop_reggrp(numframe, numcause, denomframe, denomcause, sex, agelist, mean)
sexframes[sex]['prop'] = propdict['prop']
sexframes[sex]['regvalues'] = propdict['regvalues']
plt.scatter(sexframes['2']['prop'], sexframes['1']['prop'])
for i, code in enumerate(sexframes['2']['regvalues']):
plt.annotate(code, (sexframes['2']['prop'][i], sexframes['1']['prop'][i]))
plt.xlabel(sexframes['2']['alias'])
plt.ylabel(sexframes['1']['alias'])
axmin = 0.95
axmax = 1.05
plt.xlim(min(sexframes['2']['prop'])*axmin, max(sexframes['2']['prop'])*axmax)
plt.ylim(min(sexframes['1']['prop'])*axmin, max(sexframes['1']['prop'])*axmax)
plt.title('Döda {numcausealias}/{denomcausealias}\n'
'{agealias} {startyear}\u2013{endyear}'.format(**locals()))
def perc_round(value):
return str(np.round(value, 4)).replace('.', ',')
def threep(prop):
return [{'col': 'lightsalmon', 'value': np.nanpercentile(prop, 1/3*100)},
{'col': 'tomato', 'value': np.nanpercentile(prop, 2/3*100)},
{'col': 'red', 'value': np.nanpercentile(prop, 100)}]
def fourp(prop):
return [{'col': 'lightyellow', 'value': np.nanpercentile(prop, 1/4*100)},
{'col': 'yellow', 'value': np.nanpercentile(prop, 2/4*100)},
{'col': 'tomato', 'value': np.nanpercentile(prop, 3/4*100)},
{'col': 'red', 'value': np.nanpercentile(prop, 100)}]
def fivep(prop):
return [{'col': 'lightyellow', 'value': np.nanpercentile(prop, 1/5*100)},
{'col': 'yellow', 'value': np.nanpercentile(prop, 2/5*100)},
{'col': 'orange', 'value': np.nanpercentile(prop, 3/5*100)},
{'col': 'tomato', 'value': np.nanpercentile(prop, 4/5*100)},
{'col': 'red', 'value': np.nanpercentile(prop, 100)}]
def propmap(numframe, denomframe, numdim, denomdim, numcause, denomcause,
startage, endage, sex, shapefname, percfunc = threep, mean = False):
"""Draw a map with percentiles of deaths of one cause relative to another."""
plt.close()
ages = ageslice(startage, endage, mean)
agealias = ages['alias']
agelist = ages['agelist']
sexalias = numdim['Kon']['category']['label'][sex]
numcausealias = causealias(numcause, numdim)
denomcausealias = causealias(denomcause, denomdim)
startyear = min(numframe.Tid)
endyear = max(numframe.Tid)
region_shp = shpreader.Reader(shapefname)
propdict = prop_reggrp(numframe, numcause, denomframe, denomcause, sex, agelist, mean)
prop = propdict['prop']
regvalues = propdict['regvalues']
units = list(map(scb_to_unit, regvalues))
regdict = dict(zip(units, regvalues))
percentiles = percfunc(prop)
ax = plt.axes(projection = ccrs.TransverseMercator())
boundlist = []
for region_rec in region_shp.records():
regcode = region_rec.attributes['G_UNIT']
regend = region_rec.attributes['GET_END_YE']
if (regcode in regdict.keys() and regend > 1995):
i = regvalues.index(regdict[regcode])
boundlist.append(region_rec.bounds)
for percentile in percentiles:
if prop[i] <= percentile['value']:
facecolor = percentile['col']
break
ax.add_geometries([region_rec.geometry], ccrs.TransverseMercator(),
edgecolor = 'black', facecolor = facecolor)
xmin = min([bound[0] for bound in boundlist])
xmax = max([bound[2] for bound in boundlist])
ymin = min([bound[1] for bound in boundlist])
ymax = max([bound[3] for bound in boundlist])
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
percpatches = []
perclabels = []
for i, percentile in enumerate(percentiles):
percpatch = mpatches.Rectangle((0, 0), 1, 1,
facecolor = percentile['col'])
percpatches.append(percpatch)
if i == 0:
perclabel = str('\u2265' + perc_round(min(prop)) +
'\n\u2264' + perc_round(percentile['value']))
else:
perclabel = '\u2264' + perc_round(percentile['value'])
perclabels.append(perclabel)
plt.legend(percpatches, perclabels, loc = 'upper left',
framealpha = 0.75, bbox_to_anchor=(1,1))
plt.title('Döda {numcausealias}/{denomcausealias}\n'
'{sexalias} {agealias} {startyear}\u2013{endyear}'.format(**locals()))
plt.show()
def catot_yrsdict(region, cause):
"""Return a dictionary for deaths due to a cause and all deaths over time."""
cadeaths = ndeaths([region], [cause])
totdeaths = ndeaths([region], ['TOT'])
return {'numframe': cadeaths['frame'], 'denomframe': totdeaths['frame'],
'numdim': cadeaths['dimension'], 'denomdim': totdeaths['dimension'],
'numcause': cause, 'denomcause': 'TOT', 'region': region}
def capop_yrsdict(region, cause):
"""Return a dictionary for deaths due to a cause and population over time."""
cadeaths = ndeaths([region], [cause])
pop = npop([region])
return {'numframe': cadeaths['frame'], 'denomframe': pop['frame'],
'numdim': cadeaths['dimension'], 'denomdim': pop['dimension'],
'numcause': cause, 'denomcause': 'POP', 'region': region}
def catot_mapdict(regvalues, cause, startyear, endyear,
shapefname = 'naddata/2504/__pgsql2shp2504_tmp_table.shp'):
"""Return a dictionary for deaths due to a cause and all deaths for a set of regions."""
cadeaths = ndeaths(regvalues, [cause], yearvalues = yearrange(startyear, endyear))
totdeaths = ndeaths(regvalues, ['TOT'], yearvalues = yearrange(startyear, endyear))
return {'numframe': cadeaths['frame'], 'denomframe': totdeaths['frame'],
'numdim': cadeaths['dimension'], 'denomdim': totdeaths['dimension'],
'numcause': cause, 'denomcause': 'TOT', 'shapefname': shapefname}
def capop_mapdict(regvalues, cause, startyear, endyear,
shapefname = 'naddata/2504/__pgsql2shp2504_tmp_table.shp'):
"""Return a dictionary for deaths due to a cause and population for a set of regions."""
cadeaths = ndeaths(regvalues, [cause], yearvalues = yearrange(startyear, endyear))
pop = npop(regvalues, yearvalues = yearrange(startyear, endyear))
return {'numframe': cadeaths['frame'], 'denomframe': pop['frame'],
'numdim': cadeaths['dimension'], 'denomdim': pop['dimension'],
'numcause': cause, 'denomcause': 'POP', 'shapefname': shapefname}
def reglabels(pardict):
"""Return region labels."""
return pardict['numdim']['Region']['category']['label']
|
isc
|
epfl-lts2/pygsp
|
pygsp/graphs/nngraphs/twomoons.py
|
1
|
3699
|
# -*- coding: utf-8 -*-
import numpy as np
from pygsp import utils
from pygsp.graphs import NNGraph # prevent circular import in Python < 3.5
class TwoMoons(NNGraph):
r"""Two Moons (NN-graph).
Parameters
----------
moontype : 'standard' or 'synthesized'
You have the freedom to chose if you want to create a standard
two_moons graph or a synthesized one (default is 'standard').
'standard' : Create a two_moons graph from a based graph.
'synthesized' : Create a synthesized two_moon
sigmag : float
Variance of the distance kernel (default = 0.05)
dim : int
The dimensionality of the points (default = 2).
Only valid for moontype == 'standard'.
N : int
Number of vertices (default = 2000)
Only valid for moontype == 'synthesized'.
sigmad : float
Variance of the data (do not set it too high or you won't see anything)
(default = 0.05)
Only valid for moontype == 'synthesized'.
distance : float
Distance between the two moons (default = 0.5)
Only valid for moontype == 'synthesized'.
seed : int
Seed for the random number generator (for reproducible graphs).
Examples
--------
>>> import matplotlib.pyplot as plt
>>> G = graphs.TwoMoons()
>>> fig, axes = plt.subplots(1, 2)
>>> _ = axes[0].spy(G.W, markersize=0.5)
>>> _ = G.plot(edges=True, ax=axes[1])
"""
def _create_arc_moon(self, N, sigmad, distance, number, seed):
rs = np.random.RandomState(seed)
phi = rs.rand(N, 1) * np.pi
r = 1
rb = sigmad * rs.normal(size=(N, 1))
ab = rs.rand(N, 1) * 2 * np.pi
b = rb * np.exp(1j * ab)
bx = np.real(b)
by = np.imag(b)
if number == 1:
moonx = np.cos(phi) * r + bx + 0.5
moony = -np.sin(phi) * r + by - (distance - 1)/2.
elif number == 2:
moonx = np.cos(phi) * r + bx - 0.5
moony = np.sin(phi) * r + by + (distance - 1)/2.
return np.concatenate((moonx, moony), axis=1)
def __init__(self, moontype='standard', dim=2, sigmag=0.05,
N=400, sigmad=0.07, distance=0.5, seed=None, **kwargs):
self.moontype = moontype
self.dim = dim
self.sigmag = sigmag
self.sigmad = sigmad
self.distance = distance
self.seed = seed
if moontype == 'standard':
N1, N2 = 1000, 1000
data = utils.loadmat('pointclouds/two_moons')
Xin = data['features'][:dim].T
elif moontype == 'synthesized':
N1 = N // 2
N2 = N - N1
coords1 = self._create_arc_moon(N1, sigmad, distance, 1, seed)
coords2 = self._create_arc_moon(N2, sigmad, distance, 2, seed)
Xin = np.concatenate((coords1, coords2))
else:
raise ValueError('Unknown moontype {}'.format(moontype))
self.labels = np.concatenate((np.zeros(N1), np.ones(N2)))
plotting = {
'vertex_size': 30,
}
super(TwoMoons, self).__init__(Xin=Xin, sigma=sigmag, k=5,
center=False, rescale=False,
plotting=plotting, **kwargs)
def _get_extra_repr(self):
attrs = {'moontype': self.moontype,
'dim': self.dim,
'sigmag': '{:.2f}'.format(self.sigmag),
'sigmad': '{:.2f}'.format(self.sigmad),
'distance': '{:.2f}'.format(self.distance),
'seed': self.seed}
attrs.update(super(TwoMoons, self)._get_extra_repr())
return attrs
|
bsd-3-clause
|
dilawar/moose-full
|
moose-examples/tutorials/ChemicalOscillators/slowFbOsc.py
|
4
|
3339
|
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import moose
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pylab
import numpy
import sys
def main():
"""
This example illustrates loading, and running a kinetic model
for a delayed -ve feedback oscillator, defined in kkit format.
The model is one by Boris N. Kholodenko from
Eur J Biochem. (2000) 267(6):1583-8
This model has a high-gain MAPK stage,
whose effects are visible whem one looks at the traces from
successive stages in the plots. The upstream pools have small
early peaks, and the downstream pools have large delayed ones.
The negative feedback step is mediated by a simple binding reaction
of the end-product of oscillation with an upstream activator.
We use the gsl solver here. The model already
defines some plots and sets the runtime to 4000 seconds.
The model does not really play nicely with the GSSA solver, since it
involves some really tiny amounts of the MAPKKK.
Things to do with the model:
- Look at model once it is loaded in::
moose.le( '/model' )
moose.showfields( '/model/kinetics/MAPK/MAPK' )
- Behold the amplification properties of the cascade.
Could do this by blocking the feedback step and giving a
small pulse input.
- Suggest which parameters you would alter to change
the period of the oscillator:
- Concs of various molecules, for example::
ras_MAPKKKK = moose.element( '/model/kinetics/MAPK/Ras_dash_MKKKK' )
moose.showfields( ras_MAPKKKK )
ras_MAPKKKK.concInit = 1e-5
- Feedback reaction rates
- Rates of all the enzymes::
for i in moose.wildcardFind( '/##[ISA=EnzBase]'):
i.kcat *= 10.0
"""
solver = "gsl"
mfile = '../../genesis/Kholodenko.g'
runtime = 5000.0
if ( len( sys.argv ) >= 2 ):
solver = sys.argv[1]
modelId = moose.loadModel( mfile, 'model', solver )
dt = moose.element( '/clock' ).tickDt[18]
moose.reinit()
moose.start( runtime )
# Display all plots.
img = mpimg.imread( 'Kholodenko_tut.png' )
fig = plt.figure( figsize=( 12, 10 ) )
png = fig.add_subplot( 211 )
imgplot = plt.imshow( img )
ax = fig.add_subplot( 212 )
x = moose.wildcardFind( '/model/#graphs/conc#/#' )
t = numpy.arange( 0, x[0].vector.size, 1 ) * dt
ax.plot( t, x[0].vector * 100, 'b-', label='Ras-MKKK * 100' )
ax.plot( t, x[1].vector, 'y-', label='MKKK-P' )
ax.plot( t, x[2].vector, 'm-', label='MKK-PP' )
ax.plot( t, x[3].vector, 'r-', label='MAPK-PP' )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Time (seconds)' )
pylab.legend()
pylab.show()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
gpl-2.0
|
Windy-Ground/scikit-learn
|
examples/calibration/plot_calibration_curve.py
|
225
|
5903
|
"""
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
|
bsd-3-clause
|
tchakravarty/PyMurphy
|
Archive/pmtk3-master/python/demos/ch01/knnClassifyDemo.py
|
7
|
3208
|
#!/usr/bin/env python
from utils import DATA_DIR
import os
import numpy as np
import matplotlib.pylab as pl
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.cross_validation import cross_val_score
def load_data():
"""Since the knnClassify3c.mat is the matlab v7.3 or later file
we have to load data from txt"""
train_file = os.path.join(DATA_DIR, 'knnClassify3cTrain.txt')
test_file = os.path.join(DATA_DIR, 'knnClassify3cTest.txt')
train = np.loadtxt(train_file,
dtype=[('x_train', ('f8', 2)),
('y_train', ('f8', 1))])
test = np.loadtxt(test_file,
dtype=[('x_test', ('f8', 2)),
('y_test', ('f8', 1))])
return train['x_train'], train['y_train'], test['x_test'], test['y_test']
x_train, y_train, x_test, y_test = load_data()
#plot train fig
pl.figure()
y_unique = np.unique(y_train)
markers = '*x+'
colors = 'bgr'
for i in range(len(y_unique)):
pl.scatter(x_train[y_train == y_unique[i], 0],
x_train[y_train == y_unique[i], 1],
marker=markers[i],
c=colors[i])
pl.savefig('knnClassifyDemo_1.png')
#plot test fig
pl.figure()
for i in range(len(y_unique)):
pl.scatter(x_test[y_test == y_unique[i], 0],
x_test[y_test == y_unique[i], 1],
marker=markers[i],
c=colors[i])
pl.savefig('knnClassifyDemo_2.png')
x = np.linspace(np.min(x_test[:, 0]), np.max(x_test[:, 0]), 200)
y = np.linspace(np.min(x_test[:, 1]), np.max(x_test[:, 1]), 200)
xx, yy = np.meshgrid(x, y)
xy = np.c_[xx.ravel(), yy.ravel()]
# use the knn model to predict
for k in [1, 5, 10]:
knn = KNN(n_neighbors=k)
knn.fit(x_train, y_train)
pl.figure()
y_predicted = knn.predict(xy)
pl.pcolormesh(y_predicted.reshape(200, 200))
pl.title('k=%s' % (k))
pl.savefig('knnClassifyDemo_k%s.png' % (k))
#plot train err and test err with different k
ks = [1, 5, 10, 20, 50, 100, 120]
train_errs = []
test_errs = []
for k in ks:
knn = KNN(n_neighbors=k)
knn.fit(x_train, y_train)
train_errs.append(1 - knn.score(x_train, y_train))
test_errs.append(1 - knn.score(x_test, y_test))
pl.figure()
pl.plot(ks, train_errs, 'bs:', label='train')
pl.plot(ks, test_errs, 'rx-', label='test')
pl.legend()
pl.xlabel('k')
pl.ylabel('misclassification rate')
pl.savefig('knnClassifyDemo_4.png')
#cross_validate
scores = []
for k in ks:
knn = KNN(n_neighbors=k)
score = cross_val_score(knn, x_train, y_train, cv=5)
scores.append(1 - score.mean())
pl.figure()
pl.plot(ks, scores, 'ko-')
min_k = ks[np.argmin(scores)]
pl.plot([min_k, min_k], [0, 1.0], 'b-')
pl.xlabel('k')
pl.ylabel('misclassification rate')
pl.title('5-fold cross validation, n-train = 200')
#draw hot-map to show the probability of different class
knn = KNN(n_neighbors=10)
knn.fit(x_train, y_train)
xy_predic = knn.predict_proba(xy)
levels = np.arange(0, 1.01, 0.1)
for i in range(3):
pl.figure()
pl.contourf(xy_predic[:, i].ravel().reshape(200, 200), levels)
pl.colorbar()
pl.title('p(y=%s | data, k=10)' % (i))
pl.savefig('knnClassifyDemo_hotmap_%s.png' % (i))
pl.show()
|
mit
|
brenthuisman/phd_tools
|
relunc.frombatch.py
|
1
|
3640
|
#!/usr/bin/env python
'''
generate relunc plot directly from batch.
'''
import sys,copy,glob2,image,tle,plot,numpy as np,matplotlib.pyplot as plt,matplotlib as mpl
from numpy import sqrt
if len(sys.argv) is not 2:
print "No input file names given, assume analog.mhd"
indir = "."
infile = "analog.mhd"
else:
indir = "."
infile = str(sys.argv[-1])
sources = glob2.glob(indir+"/**/"+infile)
if len(sources) < 2:
print "None or one file found, exiting..."
sys.exit()
nr = len(sources)
print len(sources), "files found:", sources[:3], '...', sources[-3:]
######################################################################################################## TODO: divide by N-1, because var = sigma^2/(N-1)
# sigma^2 = sum_of_squares/N - mean^2
# var = sum-of-squares/# - sq(sum/#)
### Load up
maskspect = image.image("mask-spect1-8/finalmask.mhd")
maskbox2 = image.image("mask-box2/phantom_Parodi_TNS_2005_52_3_modified.maskfile2.mhd")
maskbox8 = image.image("mask-box8/phantom_Parodi_TNS_2005_52_3_modified.maskfile8.mhd")
mask90pc = image.image("mask90pc/mean.mhd") #is simply analog1B mean
mask90pc.to90pcmask()
# box2ims = []
box2_sum = [0]*250
box2_sumsq = [0]*250
for filename in sources:
im = image.image(filename)
im.applymask(mask90pc,maskspect,maskbox2)
spect = im.get1dlist([1,1,1,0])
box2_sum = [x+y for x,y in zip(box2_sum, spect)]
box2_sumsq = [x+(y**2) for x,y in zip(box2_sumsq, spect)]
# box8ims = []
box8_sum = [0]*250
box8_sumsq = [0]*250
for filename in sources:
# box8ims.append(image.image(filename))
# box8ims[-1].applymask([mask90pc,maskspect,maskbox8])
# spect=box8ims[-1].get1dlist([1,1,1,0])
im = image.image(filename)
im.applymask(mask90pc,maskspect,maskbox8)
spect = im.get1dlist([1,1,1,0])
box8_sum = [x+y for x,y in zip(box8_sum, spect)]
box8_sumsq = [x+(y**2) for x,y in zip(box8_sumsq, spect)]
# print "box8_sum",box8_sum
# print "box8_sumsq",box8_sumsq
### get var
# var = sum-of-squares/# - (sum/#)^2 / N-1
relunc2 = [0]*250
relunc8 = [0]*250
for E in range(250):
if box2_sum[E] > 0 and box2_sum[E] > 0:
sig2 = box2_sum[E] / nr
var2 = ( box2_sumsq[E]/nr-(sig2)**2 ) / (nr-1)
# relunc2[E] = sqrt(abs(var2))/sig2
relunc2[E] = sqrt(var2)/sig2
if box8_sum[E] > 0 and box8_sum[E] > 0:
sig8 = box8_sum[E] / nr
var8 = ( box8_sumsq[E]/nr-(sig8)**2 ) / (nr-1)
# relunc8[E] = sqrt(abs(var8))/sig8
relunc8[E] = sqrt(var8)/sig8
#modified from tle.plt
def plot1d(ax1,dataset):
colors = plot.colors
color_index=0
#asume voxels are 2mm
x_axis = np.linspace(0,10,250)
key = 1e4
ax1.step(x_axis,[x*100. for x in dataset], label=plot.sn_mag(key)+' primaries', color=colors[color_index])
color_index+=1
ax1.semilogy()
ax1.autoscale(axis='x',tight=True)
#ax1.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f%%'))
#PG falloff line
spectcutoff = [1,8]
ax1.set_xlim(spectcutoff)
plot.texax(ax1)
f, (tl,tr) = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
plot1d(tl,relunc2)
plot1d(tr,relunc8)
# plot1d(tl,box2_sum)
# plot1d(tr,box8_sum)
tl.yaxis.set_label_position("left")
tr.yaxis.set_label_position("left")
tl.set_title("Medium 2 (Bone)")#\nMedian gain: "+plot.sn(lmed2))
tr.set_title("Medium 8 (Muscle)")#\nMedian gain: "+plot.sn(lmed8))
# tl.set_ylim([1e-1,1.1e2])
# tr.set_ylim([1e-1,1.1e2])
tl.set_ylabel('Relative Uncertainty [\%]')
tl.set_xlabel('PG energy [MeV]')
tl.xaxis.set_label_coords(1.1,-0.1)
lgd = tr.legend(loc='upper right', bbox_to_anchor=(1.7, 1.),frameon=False)
[label.set_linewidth(1) for label in lgd.get_lines()]
f.savefig('relunc-boxes.pdf', bbox_inches='tight')
plt.close('all')
|
lgpl-3.0
|
lukas/scikit-class
|
examples/keras-gan/gan.py
|
2
|
6061
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('agg') # noqa
import numpy as np
import sys
import matplotlib.pyplot as plt
import wandb
import tensorflow as tf
run = wandb.init()
config = wandb.config
class GAN():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
optimizer = tf.keras.optimizers.Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
z = tf.keras.layers.Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
validity = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = tf.keras.models.Model(z, validity)
wandb.run.summary['graph'] = wandb.Graph.from_keras(self.combined)
wandb.run._user_accessed_summary = False
self.combined.summary()
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_generator(self):
"""The model that generates imagery from a latent vector"""
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(256, input_dim=self.latent_dim))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.BatchNormalization(momentum=0.8))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.BatchNormalization(momentum=0.8))
model.add(tf.keras.layers.Dense(1024))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.BatchNormalization(momentum=0.8))
model.add(tf.keras.layers.Dense(
np.prod(self.img_shape), activation='tanh'))
model.add(tf.keras.layers.Reshape(self.img_shape))
model.summary()
noise = tf.keras.layers.Input(shape=(self.latent_dim,))
img = model(noise)
return tf.keras.models.Model(noise, img)
def build_discriminator(self):
"""The model that classifies images as real or fake"""
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=self.img_shape))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Dense(256))
model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
img = tf.keras.layers.Input(shape=self.img_shape)
validity = model(img)
return tf.keras.models.Model(img, validity)
def train(self, epochs, batch_size=128, sample_interval=50):
# Load the dataset
(X_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
# Rescale -1 to 1
X_train = X_train / 127.5 - 1.
X_train = np.expand_dims(X_train, axis=3)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Generate a batch of new images
gen_imgs = self.generator.predict(noise)
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the generator (to have the discriminator label samples as valid)
g_loss = self.combined.train_on_batch(noise, valid)
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" %
(epoch, d_loss[0], 100*d_loss[1], g_loss))
wandb.log({
"discriminator_acc": 100*d_loss[1],
"discriminator_loss": d_loss[0],
"epoch": epoch,
"generator_loss": g_loss,
"examples": [wandb.Image(img) for img in gen_imgs]
})
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
axs[i, j].axis('off')
cnt += 1
fig.savefig("latest_example.png") # % epoch)
plt.close()
if __name__ == '__main__':
gan = GAN()
gan.train(epochs=300000, batch_size=32, sample_interval=500)
|
gpl-2.0
|
cwhanse/pvlib-python
|
pvlib/tests/test_tracking.py
|
1
|
22569
|
import numpy as np
from numpy import nan
import pandas as pd
import pytest
from numpy.testing import assert_allclose
import pvlib
from pvlib import tracking, pvsystem
from .conftest import DATA_DIR, assert_frame_equal
SINGLEAXIS_COL_ORDER = ['tracker_theta', 'aoi',
'surface_azimuth', 'surface_tilt']
def test_solar_noon():
index = pd.date_range(start='20180701T1200', freq='1s', periods=1)
apparent_zenith = pd.Series([10], index=index)
apparent_azimuth = pd.Series([180], index=index)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': 0, 'aoi': 10,
'surface_azimuth': 90, 'surface_tilt': 0},
index=index, dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_scalars():
apparent_zenith = 10
apparent_azimuth = 180
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
assert isinstance(tracker_data, dict)
expect = {'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0}
for k, v in expect.items():
assert np.isclose(tracker_data[k], v)
def test_arrays():
apparent_zenith = np.array([10])
apparent_azimuth = np.array([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
assert isinstance(tracker_data, dict)
expect = {'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0}
for k, v in expect.items():
assert_allclose(tracker_data[k], v, atol=1e-7)
def test_nans():
apparent_zenith = np.array([10, np.nan, 10])
apparent_azimuth = np.array([180, 180, np.nan])
with np.errstate(invalid='ignore'):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = {'tracker_theta': np.array([0, nan, nan]),
'aoi': np.array([10, nan, nan]),
'surface_azimuth': np.array([90, nan, nan]),
'surface_tilt': np.array([0, nan, nan])}
for k, v in expect.items():
assert_allclose(tracker_data[k], v, atol=1e-7)
# repeat with Series because nans can differ
apparent_zenith = pd.Series(apparent_zenith)
apparent_azimuth = pd.Series(apparent_azimuth)
with np.errstate(invalid='ignore'):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame(np.array(
[[ 0., 10., 90., 0.],
[nan, nan, nan, nan],
[nan, nan, nan, nan]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(tracker_data, expect)
def test_arrays_multi():
apparent_zenith = np.array([[10, 10], [10, 10]])
apparent_azimuth = np.array([[180, 180], [180, 180]])
# singleaxis should fail for num dim > 1
with pytest.raises(ValueError):
tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
# uncomment if we ever get singleaxis to support num dim > 1 arrays
# assert isinstance(tracker_data, dict)
# expect = {'tracker_theta': np.full_like(apparent_zenith, 0),
# 'aoi': np.full_like(apparent_zenith, 10),
# 'surface_azimuth': np.full_like(apparent_zenith, 90),
# 'surface_tilt': np.full_like(apparent_zenith, 0)}
# for k, v in expect.items():
# assert_allclose(tracker_data[k], v)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': -60, 'aoi': 0,
'surface_azimuth': 90, 'surface_tilt': 60},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=False,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 80, 'tracker_theta': 80},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 52.5716, 'surface_azimuth': 90,
'surface_tilt': 27.42833, 'tracker_theta': 27.4283},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_axis_tilt():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741,
'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 47.6632, 'surface_azimuth': 50.96969,
'surface_tilt': 42.5152, 'tracker_theta': 31.6655},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_axis_azimuth():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 30, 'surface_azimuth': 180,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 180,
'surface_tilt': 30, 'tracker_theta': 30},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_horizon_flat():
# GH 569
solar_azimuth = np.array([0, 180, 359])
solar_zenith = np.array([100, 45, 100])
solar_azimuth = pd.Series(solar_azimuth)
solar_zenith = pd.Series(solar_zenith)
# depending on platform and numpy versions this will generate
# RuntimeWarning: invalid value encountered in > < >=
out = tracking.singleaxis(solar_zenith, solar_azimuth, axis_tilt=0,
axis_azimuth=180, backtrack=False, max_angle=180)
expected = pd.DataFrame(np.array(
[[ nan, nan, nan, nan],
[ 0., 45., 270., 0.],
[ nan, nan, nan, nan]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(out, expected)
def test_horizon_tilted():
# GH 569
solar_azimuth = np.array([0, 180, 359])
solar_zenith = np.full_like(solar_azimuth, 45)
solar_azimuth = pd.Series(solar_azimuth)
solar_zenith = pd.Series(solar_zenith)
out = tracking.singleaxis(solar_zenith, solar_azimuth, axis_tilt=90,
axis_azimuth=180, backtrack=False, max_angle=180)
expected = pd.DataFrame(np.array(
[[-180., 45., 0., 90.],
[ 0., 45., 180., 90.],
[ 179., 45., 359., 90.]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(out, expected)
def test_low_sun_angles():
# GH 656, 824
result = tracking.singleaxis(
apparent_zenith=80, apparent_azimuth=338, axis_tilt=30,
axis_azimuth=180, max_angle=60, backtrack=True, gcr=0.35)
expected = {
'tracker_theta': np.array([60.0]),
'aoi': np.array([80.420987]),
'surface_azimuth': np.array([253.897886]),
'surface_tilt': np.array([64.341094])}
for k, v in result.items():
assert_allclose(expected[k], v)
def test_SingleAxisTracker_creation():
system = tracking.SingleAxisTracker(max_angle=45,
gcr=.25,
module='blah',
inverter='blarg')
assert system.max_angle == 45
assert system.gcr == .25
assert system.module == 'blah'
assert system.inverter == 'blarg'
def test_SingleAxisTracker_one_array_only():
system = tracking.SingleAxisTracker(
arrays=[pvsystem.Array(
module='foo',
surface_tilt=None,
surface_azimuth=None
)]
)
assert system.module == 'foo'
with pytest.raises(ValueError,
match="SingleAxisTracker does not support "
r"multiple arrays\."):
tracking.SingleAxisTracker(
arrays=[pvsystem.Array(module='foo'),
pvsystem.Array(module='bar')]
)
with pytest.raises(ValueError,
match="Array must not have surface_tilt "):
tracking.SingleAxisTracker(arrays=[pvsystem.Array(module='foo')])
with pytest.raises(ValueError,
match="Array must not have surface_tilt "):
tracking.SingleAxisTracker(
arrays=[pvsystem.Array(surface_azimuth=None)])
with pytest.raises(ValueError,
match="Array must not have surface_tilt "):
tracking.SingleAxisTracker(
arrays=[pvsystem.Array(surface_tilt=None)])
def test_SingleAxisTracker_tracking():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741,
'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
# results calculated using PVsyst
pvsyst_solar_azimuth = 7.1609
pvsyst_solar_height = 27.315
pvsyst_axis_tilt = 20.
pvsyst_axis_azimuth = 20.
pvsyst_system = tracking.SingleAxisTracker(
max_angle=60., axis_tilt=pvsyst_axis_tilt,
axis_azimuth=180+pvsyst_axis_azimuth, backtrack=False)
# the definition of azimuth is different from PYsyst
apparent_azimuth = pd.Series([180+pvsyst_solar_azimuth])
apparent_zenith = pd.Series([90-pvsyst_solar_height])
tracker_data = pvsyst_system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 41.07852, 'surface_azimuth': 180-18.432,
'surface_tilt': 24.92122,
'tracker_theta': -15.18391},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
# see test_irradiance for more thorough testing
def test_get_aoi():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
surface_tilt = np.array([30, 0])
surface_azimuth = np.array([90, 270])
solar_zenith = np.array([70, 10])
solar_azimuth = np.array([100, 180])
out = system.get_aoi(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
expected = np.array([40.632115, 10.])
assert_allclose(out, expected, atol=0.000001)
def test_get_irradiance():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
# latitude=32, longitude=-111
solar_position = pd.DataFrame(np.array(
[[55.36421554, 55.38851771, 34.63578446, 34.61148229,
172.32003763, -3.44516534],
[96.50000401, 96.50000401, -6.50000401, -6.50000401,
246.91581654, -3.56292888]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
irrads = pd.DataFrame({'dni': [900, 0], 'ghi': [600, 0], 'dhi': [100, 0]},
index=times)
solar_zenith = solar_position['apparent_zenith']
solar_azimuth = solar_position['azimuth']
# invalid warnings already generated in horizon test above,
# no need to clutter test output here
with np.errstate(invalid='ignore'):
tracker_data = system.singleaxis(solar_zenith, solar_azimuth)
# some invalid values in irradiance.py. not our problem here
with np.errstate(invalid='ignore'):
irradiance = system.get_irradiance(tracker_data['surface_tilt'],
tracker_data['surface_azimuth'],
solar_zenith,
solar_azimuth,
irrads['dni'],
irrads['ghi'],
irrads['dhi'])
expected = pd.DataFrame(data=np.array(
[[961.80070, 815.94490, 145.85580, 135.32820, 10.52757492],
[nan, nan, nan, nan, nan]]),
columns=['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=times)
assert_frame_equal(irradiance, expected, check_less_precise=2)
def test_SingleAxisTracker___repr__():
system = tracking.SingleAxisTracker(
max_angle=45, gcr=.25, module='blah', inverter='blarg',
temperature_model_parameters={'a': -3.56})
expected = """SingleAxisTracker:
axis_tilt: 0
axis_azimuth: 0
max_angle: 45
backtrack: True
gcr: 0.25
cross_axis_tilt: 0.0
name: None
Array:
name: None
surface_tilt: None
surface_azimuth: None
module: blah
albedo: 0.25
racking_model: None
module_type: None
temperature_model_parameters: {'a': -3.56}
strings: 1
modules_per_string: 1
inverter: blarg"""
assert system.__repr__() == expected
def test_calc_axis_tilt():
# expected values
expected_axis_tilt = 2.239 # [degrees]
expected_side_slope = 9.86649274360294 # [degrees]
expected = DATA_DIR / 'singleaxis_tracker_wslope.csv'
expected = pd.read_csv(expected, index_col='timestamp', parse_dates=True)
# solar positions
starttime = '2017-01-01T00:30:00-0300'
stoptime = '2017-12-31T23:59:59-0300'
lat, lon = -27.597300, -48.549610
times = pd.DatetimeIndex(pd.date_range(starttime, stoptime, freq='H'))
solpos = pvlib.solarposition.get_solarposition(times, lat, lon)
# singleaxis tracker w/slope data
slope_azimuth, slope_tilt = 77.34, 10.1149
axis_azimuth = 0.0
max_angle = 75.0
# Note: GCR is relative to horizontal distance between rows
gcr = 0.33292759 # GCR = length / horizontal_pitch = 1.64 / 5 / cos(9.86)
# calculate tracker axis zenith
axis_tilt = tracking.calc_axis_tilt(
slope_azimuth, slope_tilt, axis_azimuth=axis_azimuth)
assert np.isclose(axis_tilt, expected_axis_tilt)
# calculate cross-axis tilt and relative rotation
cross_axis_tilt = tracking.calc_cross_axis_tilt(
slope_azimuth, slope_tilt, axis_azimuth, axis_tilt)
assert np.isclose(cross_axis_tilt, expected_side_slope)
sat = tracking.singleaxis(
solpos.apparent_zenith, solpos.azimuth, axis_tilt, axis_azimuth,
max_angle, backtrack=True, gcr=gcr, cross_axis_tilt=cross_axis_tilt)
np.testing.assert_allclose(
sat['tracker_theta'], expected['tracker_theta'], atol=1e-7)
np.testing.assert_allclose(sat['aoi'], expected['aoi'], atol=1e-7)
np.testing.assert_allclose(
sat['surface_azimuth'], expected['surface_azimuth'], atol=1e-7)
np.testing.assert_allclose(
sat['surface_tilt'], expected['surface_tilt'], atol=1e-7)
def test_slope_aware_backtracking():
"""
Test validation data set from https://www.nrel.gov/docs/fy20osti/76626.pdf
"""
expected_data = np.array(
[('2019-01-01T08:00-0500', 2.404287, 122.79177, -84.440, -10.899),
('2019-01-01T09:00-0500', 11.263058, 133.288729, -72.604, -25.747),
('2019-01-01T10:00-0500', 18.733558, 145.285552, -59.861, -59.861),
('2019-01-01T11:00-0500', 24.109076, 158.939435, -45.578, -45.578),
('2019-01-01T12:00-0500', 26.810735, 173.931802, -28.764, -28.764),
('2019-01-01T13:00-0500', 26.482495, 189.371536, -8.475, -8.475),
('2019-01-01T14:00-0500', 23.170447, 204.13681, 15.120, 15.120),
('2019-01-01T15:00-0500', 17.296785, 217.446538, 39.562, 39.562),
('2019-01-01T16:00-0500', 9.461862, 229.102218, 61.587, 32.339),
('2019-01-01T17:00-0500', 0.524817, 239.330401, 79.530, 5.490)],
dtype=[
('Time', '<M8[h]'), ('ApparentElevation', '<f8'),
('SolarAzimuth', '<f8'), ('TrueTracking', '<f8'),
('Backtracking', '<f8')])
expected_axis_tilt = 9.666
expected_slope_angle = -2.576
slope_azimuth, slope_tilt = 180.0, 10.0
axis_azimuth = 195.0
axis_tilt = tracking.calc_axis_tilt(
slope_azimuth, slope_tilt, axis_azimuth)
assert np.isclose(axis_tilt, expected_axis_tilt, rtol=1e-3, atol=1e-3)
cross_axis_tilt = tracking.calc_cross_axis_tilt(
slope_azimuth, slope_tilt, axis_azimuth, axis_tilt)
assert np.isclose(
cross_axis_tilt, expected_slope_angle, rtol=1e-3, atol=1e-3)
sat = tracking.singleaxis(
90.0-expected_data['ApparentElevation'], expected_data['SolarAzimuth'],
axis_tilt, axis_azimuth, max_angle=90.0, backtrack=True, gcr=0.5,
cross_axis_tilt=cross_axis_tilt)
np.testing.assert_allclose(
sat['tracker_theta'], expected_data['Backtracking'],
rtol=1e-3, atol=1e-3)
truetracking = tracking.singleaxis(
90.0-expected_data['ApparentElevation'], expected_data['SolarAzimuth'],
axis_tilt, axis_azimuth, max_angle=90.0, backtrack=False, gcr=0.5,
cross_axis_tilt=cross_axis_tilt)
np.testing.assert_allclose(
truetracking['tracker_theta'], expected_data['TrueTracking'],
rtol=1e-3, atol=1e-3)
|
bsd-3-clause
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/core/frame.py
|
7
|
214523
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from numpy import nan as NA
import numpy as np
import numpy.ma as ma
from pandas.types.cast import (_maybe_upcast,
_infer_dtype_from_scalar,
_possibly_cast_to_datetime,
_possibly_infer_to_datetimelike,
_possibly_convert_platform,
_possibly_downcast_to_dtype,
_invalidate_string_dtypes,
_coerce_to_dtypes,
_maybe_upcast_putmask,
_find_common_type)
from pandas.types.common import (is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_datetimetz,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
_ensure_float,
_ensure_float64,
_ensure_int64,
_ensure_platform_int,
is_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.types.missing import isnull, notnull
from pandas.core.common import (PandasError, _try_sort,
_default_index,
_values_from_object,
_maybe_box_datetimelike,
_dict_compat)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.computation.expressions as expressions
import pandas.core.algorithms as algos
from pandas.computation.eval import eval as _eval
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util.decorators import deprecate_kwarg, Appender, Substitution
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
import pandas.core.base as base
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.formats.format as fmt
from pandas.formats.printing import pprint_thing
import pandas.tools.plotting as gfx
import pandas.lib as lib
import pandas.algos as _algos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""")
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
on : label or list
Field names to join on. Must be found in both DataFrames. If on is
None and not merging on indexes, then it merges on the intersection of
the columns by default.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
.. versionadded:: 0.17.0
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge_ordered
merge_asof
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
>>> d = {'col1': ts1, 'col2': ts2}
>>> df = DataFrame(data=d, index=index)
>>> df2 = DataFrame(np.random.randn(10, 5))
>>> df3 = DataFrame(np.random.randn(10, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = dict((k, data[k]) for k in data_columns)
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None):
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = _default_index(len(data[0]))
else:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
if isinstance(data, compat.string_types) and dtype is None:
dtype = np.object_
if dtype is None:
dtype, data = _infer_dtype_from_scalar(data)
values = np.empty((len(index), len(columns)), dtype=dtype)
values.fill(data)
mgr = self._init_ndarray(values, index, columns, dtype=dtype,
copy=False)
else:
raise PandasError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# GH10856
# raise ValueError if only scalars in dict
if index is None:
extract_index(list(data.values()))
# prefilter if columns passed
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
elif np.issubdtype(dtype, np.flexible):
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(NA)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
return self._init_dict({0: values}, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if values.dtype != dtype:
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = _possibly_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list with the row axis labels and column axis labels as the
only members. They are returned in that order.
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = fmt.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actualy checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = fmt.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular Dataframe.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if get_option('display.latex.repr'):
return self.to_latex()
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.formats.style.Styler
"""
from pandas.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
if compat.PY3: # pragma: no cover
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict'):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
.. versionadded:: 0.17.0
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
Returns
-------
result : dict like {column -> {index -> value}}
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning)
if orient.lower().startswith('d'):
return dict((k, v.to_dict()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return dict((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return {'index': self.index.tolist(),
'columns': self.columns.tolist(),
'data': lib.map_infer(self.values.ravel(),
_maybe_box_datetimelike)
.reshape(self.values.shape).tolist()}
elif orient.lower().startswith('s'):
return dict((k, _maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [dict((k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, row))
for row in self.values]
elif orient.lower().startswith('i'):
return dict((k, v.to_dict()) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
"""Write a DataFrame to a Google BigQuery table.
THIS IS AN EXPERIMENTAL LIBRARY
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
.. versionadded:: 0.17.0
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
result_index = MultiIndex.from_arrays(
[arrays[i] for i in to_remove], names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = lmap(str, index_names) + lmap(str, self.columns)
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(str, self.columns)
dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)])
return np.rec.fromarrays(arrays, dtype=dtype, names=names)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
return cls._from_arrays(arrays, columns, None)
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=False,
infer_datetime_format=False):
"""
Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sortlevel(0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=False, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are comverted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
.. versionadded:: 0.16.0
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True):
"""
Write DataFrame to a excel sheet
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : boolean, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding: string, default None
encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
Notes
-----
If passing an existing ExcelWriter object, then the sheet will be added
to the existing workbook. This can be used to save different
DataFrames to one workbook:
>>> writer = ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
For compatibility with to_csv, to_excel serializes lists and dicts to
strings before writing.
"""
from pandas.io.excel import ExcelWriter
need_save = False
if encoding is None:
encoding = 'ascii'
if isinstance(excel_writer, compat.string_types):
excel_writer = ExcelWriter(excel_writer, engine=engine)
need_save = True
formatter = fmt.ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatted_cells = formatter.get_formatted_cells()
excel_writer.write_cells(formatted_cells, sheet_name,
startrow=startrow, startcol=startcol)
if need_save:
excel_writer.save()
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when wirting the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index,
variable_labels=variable_labels)
writer.write_file()
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
@Appender(fmt.common_docstring + fmt.return_docstring, indents=1)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=True,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.'):
"""
Render a DataFrame to a tabular environment table. You can splice
this into a LaTeX document. Requires \\usepackage{booktabs}.
`to_latex`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3
columns
longtable : boolean, default will be read from the pandas config module
default: False
Use a longtable environment instead of tabular. Requires adding
a \\usepackage{longtable} to your LaTeX preamble.
escape : boolean, default will be read from the pandas config module
default: True
When set to False prevents from escaping latex special
characters in column names.
encoding : str, default None
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
"""
# Get defaults from the pandas config
if longtable is None:
longtable = get_option("display.latex.longtable")
if escape is None:
escape = get_option("display.latex.escape")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean/string, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. A value of 'deep' is equivalent
of True, with deep introspection. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than
max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
"""
from pandas.formats.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max([len(pprint_thing(k)) for k in self.columns]) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)'
% (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if 'object' in counts or is_object_dtype(self.index):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""Memory usage of DataFrame columns.
Parameters
----------
index : bool
Specifies whether to include memory usage of DataFrame's
index in returned Series. If `index=True` (default is False)
the first index of the Series is `Index`.
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
sizes : Series
A series with column names as index and memory usage of
columns with units of bytes.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""Transpose index and columns"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
if takeable:
series = self._iget_item_cache(col)
return _maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
return engine.get_value(series.get_values(), index)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series.set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def irow(self, i, copy=False):
"""
DEPRECATED. Use ``.iloc[i]`` instead
"""
warnings.warn("irow(i) is deprecated. Please use .iloc[i]",
FutureWarning, stacklevel=2)
return self._ixs(i, axis=0)
def icol(self, i):
"""
DEPRECATED. Use ``.iloc[:, i]`` instead
"""
warnings.warn("icol(i) is deprecated. Please use .iloc[:,i]",
FutureWarning, stacklevel=2)
return self._ixs(i, axis=1)
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.ix[:, lab_slice]
else:
if isinstance(label, Index):
return self.take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced.from_array(values,
index=self.index,
name=label,
fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def iget_value(self, i, j):
"""
DEPRECATED. Use ``.iat[i, j]`` instead
"""
warnings.warn("iget_value(i, j) is deprecated. Please use .iat[i, j]",
FutureWarning, stacklevel=2)
return self.iat[i, j]
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0, convert=False)
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
return self.take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
if len(result.columns) == 1:
top = result.columns[0]
if ((type(top) == str and top == '') or
(type(top) == tuple and top[0] == '')):
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
.. versionadded:: 0.13
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=None, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
inplace : bool
If the expression contains an assignment, whether to return a new
DataFrame or mutate the existing.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.assign
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : list-like
A list of dtypes or strings to be included/excluded. You must pass
in a non-empty sequence for at least one of these.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
TypeError
* If either of ``include`` or ``exclude`` is not a sequence
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select Pandas categorical dtypes, use 'category'
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
include, exclude = include or (), exclude or ()
if not (is_list_like(include) and is_list_like(exclude)):
raise TypeError('include and exclude must both be non-string'
' sequences')
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
_invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced.from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(key, DataFrame):
self._setitem_frame(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.ix._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.ix._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.ix._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.size and not is_bool_dtype(key.values):
raise TypeError('Must pass DataFrame with boolean values only')
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
If `allow_duplicates` is False, raises Exception if column
is already contained in the DataFrame.
Parameters
----------
loc : int
Must have 0 <= loc <= len(columns)
column : object
value : scalar, Series, or array-like
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
.. versionadded:: 0.16.0
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your
arguments may not be preserved. The make things predicatable,
the columns are inserted in alphabetical order, at the end of
your DataFrame. Assigning multiple columns within the same
``assign`` is possible, but you cannot reference other columns
created within the same ``assign`` call.
Examples
--------
>>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = {}
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# ... and then assign
for k, v in sorted(results.items()):
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex_axis(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = _possibly_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = _possibly_infer_to_datetimelike(value)
else:
# upcast the scalar
dtype, value = _infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
value = _possibly_cast_to_datetime(value, dtype)
# return internal types directly
if is_extension_type(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self.get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, copy, level, fill_value,
limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method, level,
limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, level=level,
limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algos.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).reindex(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).rename(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> indexed_df = df.set_index(['A', 'B'])
>>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]])
>>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]])
Returns
-------
dataframe : DataFrame
"""
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index.get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col.get_level_values(n))
level = col.get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = MultiIndex.from_arrays(arrays, names=names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
values = values.take(labels)
if mask.any():
values, changed = _maybe_upcast_putmask(values, mask,
np.nan)
return values
new_index = _default_index(len(new_obj))
if isinstance(self.index, MultiIndex):
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if not drop:
names = self.index.names
zipped = lzip(self.index.levels, self.index.labels)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(zipped))):
col_name = names[i]
if col_name is None:
col_name = 'level_%d' % i
if multi_col:
if col_fill is None:
col_name = tuple([col_name] * self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = col_name
col_name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
if level is None or i in level:
new_obj.insert(0, col_name, level_values)
elif not drop:
name = self.index.name
if name is None or name == 'index':
name = 'index' if 'index' not in self else 'level_0'
if isinstance(self.columns, MultiIndex):
if col_fill is None:
name = tuple([name] * self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = name
name = tuple(name_lst)
values = _maybe_casted_values(self.index)
new_obj.insert(0, name, values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, default False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
"""
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self.take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
take_last : deprecated
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
take_last : deprecated
Returns
-------
duplicated : Series
"""
from pandas.core.groupby import get_group_index
from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algos.factorize(vals,
size_hint=min(len(self),
_SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
vals = (self[col].values for col in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
axis = self._get_axis_number(axis)
other_axis = 0 if axis == 1 else 1
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.groupby import _lexsort_indexer
def trans(v):
if needs_i8_conversion(v):
return v.view('i8')
return v
keys = []
for x in by:
k = self.xs(x, axis=other_axis).values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' %
str(x))
keys.append(trans(k))
indexer = _lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.groupby import _nargsort
by = by[0]
k = self.xs(by, axis=other_axis).values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a '
'multi-index you need to explicity '
'provide all the levels' % str(by))
raise ValueError('Cannot sort by duplicate column %s' %
str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = _nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sort(self, columns=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last', **kwargs):
"""
DEPRECATED: use :meth:`DataFrame.sort_values`
Sort DataFrame either by labels (along either axis) or by the values in
column(s)
Parameters
----------
columns : object
Column name(s) in frame. Accepts a column name or a list
for a nested sort. A tuple will be interpreted as the
levels of a multi-index.
ascending : boolean or list, default True
Sort ascending vs. descending. Specify list for multiple sort
orders
axis : {0 or 'index', 1 or 'columns'}, default 0
Sort index/rows versus columns
inplace : boolean, default False
Sort the DataFrame without creating a new instance
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
This option is only applied when sorting on a single column or
label.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Examples
--------
>>> result = df.sort(['A', 'B'], ascending=[1, 0])
Returns
-------
sorted : DataFrame
"""
nv.validate_sort(tuple(), kwargs)
if columns is None:
warnings.warn("sort(....) is deprecated, use sort_index(.....)",
FutureWarning, stacklevel=2)
return self.sort_index(axis=axis, ascending=ascending,
inplace=inplace)
warnings.warn("sort(columns=....) is deprecated, use "
"sort_values(by=.....)", FutureWarning, stacklevel=2)
return self.sort_values(by=columns, axis=axis, ascending=ascending,
inplace=inplace, kind=kind,
na_position=na_position)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, pls use "
".sort_values(by=...)", FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# sort by the index
if level is not None:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.groupby import _lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
if not labels.is_lexsorted():
labels = MultiIndex.from_tuples(labels.values)
indexer = _lexsort_indexer(labels.labels, orders=ascending,
na_position=na_position)
else:
from pandas.core.groupby import _nargsort
# GH11080 - Check monotonic-ness before sort an index
# if monotonic (already sorted), return None or copy() according
# to 'inplace'
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = _nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nlargest(3, 'a')
a b c
3 11 c 3
1 10 b 2
2 8 d NaN
"""
return algos.select_n_frame(self, columns, n, 'nlargest', keep)
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nsmallest(3, 'a')
a b c
4 -1 e 4
0 1 a 1
2 8 d NaN
"""
return algos.select_n_frame(self, columns, n, 'nsmallest', keep)
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isnull(left)
right_mask = isnull(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([(col, f(col)) for col in this])
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([
(i, f(i)) for i, col in enumerate(this.columns)
])
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level,
fill_value=fill_value)
else:
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
return self._combine_series_infer(other, func, level=level,
fill_value=fill_value)
def _combine_series_infer(self, other, func, level=None, fill_value=None):
if len(other) == 0:
return self * NA
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
def _combine_match_index(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index])
return self._constructor(new_data)
def _combine_const(self, other, func, raise_on_error=True):
if self.empty:
return self
new_data = self._data.eval(func=func, other=other,
raise_on_error=raise_on_error)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep):
# unique
if self.columns.is_unique:
def _compare(a, b):
return dict([(col, func(a[col], b[col])) for col in a.columns])
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))
for i, col in enumerate(a.columns)])
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep)
def _flex_compare_frame(self, other, func, str_rep, level):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level, copy=False)
return self._compare_frame_evaluate(other, func, str_rep)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isnull(series)
other_mask = isnull(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = _find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = _ensure_float(arr)
arr[this_mask & other_mask] = NA
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
# _possibly_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
arr = _possibly_cast_to_datetime(arr, new_dtype)
else:
arr = _possibly_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Examples
--------
a's values prioritized, use values from b to fill holes:
>>> a.combine_first(b)
Returns
-------
combined : DataFrame
"""
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isnull(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isnull(x_values)
return expressions.where(mask, y_values, x_values,
raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
"""
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isnull(that)
else:
if raise_conflict:
mask_this = notnull(that)
mask_that = notnull(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isnull(that)
# don't overwrite columns unecessarily
if mask.all():
continue
else:
mask = notnull(this)
self[col] = expressions.where(mask, this, that,
raise_on_error=True)
# ----------------------------------------------------------------------
# Misc methods
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][0]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][-1]
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes of the resulting
DataFrame.
Parameters
----------
index : string or object, optional
Column name to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns
Returns
-------
pivoted : DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
DataFrame.unstack : pivot based on the index values instead of a
column
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
0 one A 1
1 one B 2
2 one C 3
3 two A 4
4 two B 5
5 two C 6
>>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
"""
from pandas.core.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded: 0.16.1
Returns
-------
diffed : DataFrame
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""
Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index': apply function to each column
* 1 or 'columns': apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
Returns
-------
applied : Series or DataFrame
"""
axis = self._get_axis_number(axis)
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
if len(self.columns) == 0 and len(self.index) == 0:
return self._apply_empty_result(func, axis, reduce, *args, **kwds)
if isinstance(f, np.ufunc):
with np.errstate(all='ignore'):
results = f(self.values)
return self._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
else:
if not broadcast:
if not all(self.shape):
return self._apply_empty_result(func, axis, reduce, *args,
**kwds)
if raw and not self._is_mixed_type:
return self._apply_raw(f, axis)
else:
if reduce is None:
reduce = True
return self._apply_standard(f, axis, reduce=reduce)
else:
return self._apply_broadcast(f, axis)
def _apply_empty_result(self, func, axis, reduce, *args, **kwds):
if reduce is None:
reduce = False
try:
reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),
Series)
except Exception:
pass
if reduce:
return Series(NA, index=self._get_agg_axis(axis))
else:
return self.copy()
def _apply_raw(self, func, axis):
try:
result = lib.reduce(self.values, func, axis=axis)
except Exception:
result = np.apply_along_axis(func, axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return DataFrame(result, index=self.index, columns=self.columns)
else:
return Series(result, index=self._get_agg_axis(axis))
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if (reduce and axis == 1 and self._is_mixed_type and
self._is_datelike_mixed_type):
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_extension_type(values):
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis),
dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy,
labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1)
for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (Series.from_array(arr, index=res_columns, name=name,
dtype=dtype)
for i, (arr, name) in enumerate(zip(values,
res_index)))
else: # pragma : no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
def _apply_broadcast(self, func, axis):
if axis == 0:
target = self
elif axis == 1:
target = self.T
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % axis)
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = func(target[col])
result = self._constructor(result_values, index=target.index,
columns=target.columns)
if axis == 1:
result = result.T
return result
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Examples
--------
>>> df = pd.DataFrame(np.random.randn(3, 3))
>>> df
0 1 2
0 -0.029638 1.081563 1.280300
1 0.647747 0.831136 -1.549481
2 0.513416 -0.884417 0.195343
>>> df = df.applymap(lambda x: '%.2f' % x)
>>> df
0 1 2
0 -0.03 1.08 1.28
1 0.65 0.83 -1.55
2 0.51 -0.88 0.20
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
return lib.map_infer(x.asobject, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.ix[:, self.columns]
from pandas.tools.merge import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : column name, tuple/list of column names, or array-like
Column(s) in the caller to join on the index in other,
otherwise joins index-on-index. If multiples
columns given, the passed DataFrame must have a MultiIndex. Can
pass an array as the join key if not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
preserves the index order of the calling (left) DataFrame
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.tools.merge import merge, concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
from pandas.tools.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
.. versionadded:: 0.17.0
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.tools.merge import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
mat = numeric_df.values
if method == 'pearson':
correl = _algos.nancorr(_ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = _algos.nancorr_spearman(_ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = _ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = NA
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=cols, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
mat = numeric_df.values
if notnull(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = _algos.nancorr(_ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=cols, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
if isinstance(other, Series):
return self.apply(other.corr, axis=axis)
this = self._get_numeric_data()
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type:
result = notnull(frame).sum(axis=axis)
else:
counts = notnull(frame.values).sum(axis=axis)
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notnull(frame.values) might
# upcast everything to object
mask = notnull(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notnull(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = _ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
result = self.apply(f, reduce=False)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented." %
filter_type)
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notnull(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = _coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be first index.
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explict about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Empty if
nothing has 2+ occurrences. Adds a row for each mode per label, fills
in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from collections import defaultdict
from pandas.tools.merge import concat
values = defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(lib.ismember(self.values.ravel(),
set(values)).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Deprecated stuff
def combineAdd(self, other):
"""
DEPRECATED. Use ``DataFrame.add(other, fill_value=0.)`` instead.
Add two DataFrame objects and do not propagate
NaN values, so if for a (column, time) one frame is missing a
value, it will default to the other frame's value (which might
be NaN as well)
Parameters
----------
other : DataFrame
Returns
-------
DataFrame
See also
--------
DataFrame.add
"""
warnings.warn("'combineAdd' is deprecated. Use "
"'DataFrame.add(other, fill_value=0.)' instead",
FutureWarning, stacklevel=2)
return self.add(other, fill_value=0.)
def combineMult(self, other):
"""
DEPRECATED. Use ``DataFrame.mul(other, fill_value=1.)`` instead.
Multiply two DataFrame objects and do not propagate NaN values, so if
for a (column, time) one frame is missing a value, it will default to
the other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
Returns
-------
DataFrame
See also
--------
DataFrame.mul
"""
warnings.warn("'combineMult' is deprecated. Use "
"'DataFrame.mul(other, fill_value=1.)' instead",
FutureWarning, stacklevel=2)
return self.mul(other, fill_value=1.)
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
_EMPTY_SERIES = Series([])
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = _default_index(lengths[0])
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return _possibly_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = _default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(arr_columns).get_indexer(columns)
arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_combined_index
if columns is None:
columns = _get_combined_index([
s.index for s in data if getattr(s, 'index', None) is not None
])
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(algos.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = _possibly_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any([getattr(s, 'name', None) is not None for s in data])
if not has_some_name:
return _default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = _dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=NA)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
DataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods,
gfx.FramePlotMethods)
DataFrame.hist = gfx.hist_frame
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None, **kwds):
import pandas.tools.plotting as plots
import matplotlib.pyplot as plt
ax = plots.boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
DataFrame.boxplot = boxplot
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
gpl-3.0
|
wzbozon/scikit-learn
|
sklearn/decomposition/tests/test_incremental_pca.py
|
297
|
8265
|
"""Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
|
bsd-3-clause
|
djgagne/scikit-learn
|
examples/model_selection/plot_train_error_vs_test_error.py
|
349
|
2577
|
"""
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
|
bsd-3-clause
|
bhargav/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
7
|
81060
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=np.float64, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=np.float64,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if (isinstance(self.precompute, six.string_types) and
self.precompute == 'auto'):
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
y = np.asarray(y, dtype=np.float64)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, dtype=np.float64, order='F', copy=False,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T,
dense_output=True) +
self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=np.float64, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
jm-begon/scikit-learn
|
sklearn/feature_extraction/tests/test_feature_hasher.py
|
258
|
2861
|
from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
|
bsd-3-clause
|
madjelan/scikit-learn
|
examples/semi_supervised/plot_label_propagation_digits_active_learning.py
|
294
|
3417
|
"""
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
|
bsd-3-clause
|
masfaraud/volmdlr
|
scripts/distance/tore_cyl.py
|
1
|
4434
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 14:56:13 2020
@author: Mack Pro
"""
import numpy as npy
import volmdlr as volmdlr
import volmdlr.primitives3D as primitives3D
import volmdlr.primitives2D as primitives2D
import matplotlib.pyplot as plt
import random
import math
rmin, rmax = 100, 1000
posmin, posmax = -100, 100
x1, y1, z1 = random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100
x2, y2, z2 = random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100
R1 = random.randrange(rmin, rmax, 1)/1000 #Radius of the generative arc3D
r1, r2 = random.randrange(rmin/10, rmax/10, 1)/1000, random.randrange(rmin/2, rmax/2, 1)/1000 #Radius of the arc3d generated
c1, c2 = volmdlr.Point3D([x1,y1,z1]), volmdlr.Point3D([x2,y2,z2]) #Choose the coordinate of the center
x3, y3, z3 = random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100
x4, y4, z4 = random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100, random.randrange(posmin, posmax, 1)/100
n1, n2 = volmdlr.Vector3D([x3,y3,z3]), volmdlr.Vector3D([x4,y4,z4]) #Choose the normal
n1.Normalize() #Normalize the normal if it is not the case
n2.Normalize()
plane1, plane2 = volmdlr.Plane3D.from_normal(c1, n1), volmdlr.Plane3D.from_normal(c2, n2) #Create a plane to give us two others vector
frame1 = volmdlr.Frame3D(c1, plane1.vectors[0], plane1.vectors[1], n1) #Frame in the center of the Tore
frame2 = volmdlr.Frame3D(c2, plane2.vectors[0], plane2.vectors[1], n2)
toresurface1 = volmdlr.ToroidalSurface3D(frame1, R1, r1)
cylsurface2 = volmdlr.CylindricalSurface3D(frame2, r2)
angle_min, angle_max = 0, 2*3.14*100
theta1 = random.randrange(angle_min, angle_max, 20)/100 #Tore's length
phi1 = 2*math.pi #angle of circle
offset_theta1 = random.randrange(angle_min, angle_max, 20)/100 #Theta's offset if you want to turn it with normal's reference
offset_phi1 = random.randrange(angle_min, angle_max, 20)/100 #Idem but with circle's normal
print('param1', phi1, theta1, offset_phi1, offset_theta1)
#You have to create a cutting pattern in 2D
pt1, pt2, pt3, pt4 = volmdlr.Point2D((offset_theta1, offset_phi1)), volmdlr.Point2D((offset_theta1, offset_phi1+phi1)), volmdlr.Point2D((offset_theta1+theta1, offset_phi1+phi1)), volmdlr.Point2D((offset_theta1+theta1, offset_phi1))
seg1, seg2, seg3, seg4 = volmdlr.LineSegment2D(pt1, pt2), volmdlr.LineSegment2D(pt2, pt3), volmdlr.LineSegment2D(pt3, pt4), volmdlr.LineSegment2D(pt4, pt1)
edges = [seg1, seg2, seg3, seg4]
contours2d = [volmdlr.Contour2D(edges)]
points = [theta1, phi1]
#Cylinder
hmin, hmax = -50, 50
h2 = random.randrange(hmin, hmax, 5)/100 #Height of cylinder
angle_cyl = random.randrange(angle_min, angle_max, 20)/100
center2d2 = c2.To2D(c2, plane2.vectors[0], plane2.vectors[1])
segbh2 = volmdlr.LineSegment2D(center2d2, center2d2 + volmdlr.Point2D((0,h2)) + volmdlr.Point2D((angle_cyl/3,0))) #### Minus Pt2D because of Step adaptation
circlestart2 = volmdlr.LineSegment2D(segbh2.points[1], segbh2.points[1]+volmdlr.Point2D((angle_cyl,0)) - volmdlr.Point2D((0,h2/10))) #You can change 2*pi by an other angle
seghb2 = volmdlr.LineSegment2D(circlestart2.points[1],circlestart2.points[1]-segbh2.points[1] + volmdlr.Point2D((angle_cyl/3,0)))
circlend2 = volmdlr.LineSegment2D(seghb2.points[1],segbh2.points[0])
edges2 = [segbh2, circlestart2, seghb2, circlend2]
points2 = edges2[0].points
contours2 = [volmdlr.Contour2D(edges2)]
toroidalface1 = volmdlr.ToroidalFace3D(contours2d, toresurface1, points)
cyl2 = volmdlr.CylindricalFace3D(contours2, cylsurface2, points2)
pts1, tangle1 = toroidalface1.triangulation(resolution=10)
pts2, tangle2 = cyl2.triangulation(resolution=12)
distance, p1, p2 = toroidalface1.minimum_distance(cyl2, return_points=True)
print(distance)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# [pt.MPLPlot(ax=ax) for pt in pts1]
# [pt.MPLPlot(ax=ax) for pt in pts2]
# p1.MPLPlot(ax=ax, color='r')
# p2.MPLPlot(ax=ax, color='b')
# toroidalface1.start.MPLPlot(ax=ax, color='m')
# toroidalface2.start.MPLPlot(ax=ax, color='g')
# LS = volmdlr.LineSegment3D(p1, p2)
shell = volmdlr.Shell3D([toroidalface1,cyl2])
vol = volmdlr.VolumeModel([shell, p1, p2])
vol.babylonjs_from_script()
# m = volmdlr.VolumeModel([shell])
# m.babylonjs()
|
gpl-3.0
|
clembou/PCWG
|
pcwg/gui/dataset.py
|
1
|
54713
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 10 14:27:00 2016
@author: Stuart
"""
import Tkinter as tk
import tkFileDialog
import ttk
import tkMessageBox
import os.path
import re
import pandas as pd
import base_dialog
import validation
from grid_box import GridBox
from grid_box import DialogGridBox
from ..configuration.base_configuration import RelationshipFilter
from ..configuration.base_configuration import Filter
from ..configuration.dataset_configuration import Exclusion
from ..configuration.dataset_configuration import CalibrationSector
from ..configuration.dataset_configuration import ShearMeasurement
from ..configuration.dataset_configuration import DatasetConfiguration
from ..configuration.preferences_configuration import Preferences
from ..core.dataset import getSeparatorValue
from ..core.dataset import getDecimalValue
from ..exceptions.handling import ExceptionHandler
from ..core.status import Status
columnSeparator = "|"
def encodeRelationshipFilterValuesAsText(relationshipFilter):
text = ""
for clause in relationshipFilter.clauses:
text += encodeFilterValuesAsText(clause.column,clause.value, clause.filterType, clause.inclusive, "" )
text += " #" + relationshipFilter.conjunction + "# "
return text[:-5]
def encodeFilterValuesAsText(column, value, filterType, inclusive, active):
return "{column}{sep}{value}{sep}{FilterType}{sep}{inclusive}{sep}{active}".format(column = column, sep = columnSeparator,value = value, FilterType = filterType, inclusive =inclusive, active = active)
def extractRelationshipFilterFromText(text):
try:
clauses = []
for i, subFilt in enumerate(text.split(base_dialog.filterSeparator)):
if i%2 == 0:
items = subFilt.split(base_dialog.columnSeparator)
column = items[0].strip()
value = float(items[1].strip())
filterType = items[2].strip()
inclusive = base_dialog.getBoolFromText(items[3].strip())
clauses.append(Filter(True,column,filterType,inclusive,value))
else:
if len(subFilt.strip()) > 1:
conjunction = subFilt.strip()
return RelationshipFilter(True,conjunction,clauses)
except ExceptionHandler.ExceptionType as ex:
ExceptionHandler.add(ex, "Cannot parse values from filter text")
class FilterDialog(base_dialog.BaseDialog):
def __init__(self, master, parent_dialog, item = None):
self.parent_dialog = parent_dialog
self.isNew = (item == None)
if self.isNew:
self.item = Filter()
else:
self.item = item
base_dialog.BaseDialog.__init__(self, master)
def ShowColumnPicker(self, parentDialog, pick, selectedColumn):
return self.parent_dialog.ShowColumnPicker(parentDialog, pick, selectedColumn)
def body(self, master):
self.prepareColumns(master)
self.addTitleRow(master, "Filter Settings:")
self.column = self.addPickerEntry(master, "Column:", validation.ValidateNotBlank(master), self.item.column)
self.value = self.addEntry(master, "Value:", validation.ValidateFloat(master), self.item.value)
self.filterType = self.addOption(master, "Filter Type:", ["Below", "Above", "AboveOrBelow"], self.item.filterType)
if self.item.inclusive:
self.inclusive = self.addCheckBox(master, "Inclusive:", 1)
else:
self.inclusive = self.addCheckBox(master, "Inclusive:", 0)
if self.item.active:
self.active = self.addCheckBox(master, "Active:", 1)
else:
self.active = self.addCheckBox(master, "Active:", 0)
#dummy label to indent controls
tk.Label(master, text=" " * 5).grid(row = (self.row-1), sticky=tk.W, column=self.titleColumn)
def apply(self):
if int(self.active.get()) == 1:
self.item.active = True
else:
self.item.active = False
if int(self.inclusive.get()) == 1:
self.item.inclusive = True
else:
self.item.inclusive = False
self.item.column = self.column.get()
self.item.value = float(self.value.get())
self.item.filterType = self.filterType.get()
if self.isNew:
Status.add("Filter created")
else:
Status.add("Filter updated")
class ExclusionDialog(base_dialog.BaseDialog):
def __init__(self, master, parent_dialog, item = None):
self.isNew = (item == None)
if self.isNew:
self.item = Exclusion()
else:
self.item = item
base_dialog.BaseDialog.__init__(self, master)
def body(self, master):
self.prepareColumns(master)
#dummy label to force width
tk.Label(master, text=" " * 275).grid(row = self.row, sticky=tk.W, column=self.titleColumn, columnspan = 8)
self.row += 1
self.addTitleRow(master, "Exclusion Settings:")
self.startDate = self.addDatePickerEntry(master, "Start Date:", validation.ValidateNotBlank(master), self.item.startDate)
self.endDate = self.addDatePickerEntry(master, "End Date:", validation.ValidateNotBlank(master), self.item.endDate)
if self.item.active:
self.active = self.addCheckBox(master, "Active:", 1)
else:
self.active = self.addCheckBox(master, "Active:", 0)
#dummy label to indent controls
tk.Label(master, text=" " * 5).grid(row = (self.row-1), sticky=tk.W, column=self.titleColumn)
def apply(self):
if int(self.active.get()) == 1:
self.item.active = True
else:
self.item.active = False
self.item.startDate = pd.to_datetime(self.startDate.get().strip(), dayfirst =True)
self.item.endDate = pd.to_datetime(self.endDate.get().strip(), dayfirst =True)
if self.isNew:
Status.add("Exclusion created")
else:
Status.add("Exclusion updated")
class CalibrationDirectionDialog(base_dialog.BaseDialog):
def __init__(self, master, parent_dialog, item):
self.isNew = (item == None)
if self.isNew:
self.item = CalibrationSector()
else:
self.item = item
base_dialog.BaseDialog.__init__(self, master)
def body(self, master):
self.prepareColumns(master)
self.addTitleRow(master, "Calibration Direction Settings:")
self.direction = self.addEntry(master, "Direction:", validation.ValidateFloat(master), self.item.direction)
self.slope = self.addEntry(master, "Slope:", validation.ValidateFloat(master), self.item.slope)
self.offset = self.addEntry(master, "Offset:", validation.ValidateFloat(master), self.item.offset)
if self.item.active:
self.active = self.addCheckBox(master, "Active:", 1)
else:
self.active = self.addCheckBox(master, "Active:", 0)
#dummy label to indent controls
tk.Label(master, text=" " * 5).grid(row = (self.row-1), sticky=tk.W, column=self.titleColumn)
def apply(self):
if int(self.active.get()) == 1:
self.item.active = True
else:
self.item.active = False
self.item.direction = float(self.direction.get())
self.item.slope = float(self.slope.get().strip())
self.item.offset = float(self.offset.get().strip())
if self.isNew:
Status.add("Calibration direction created")
else:
Status.add("Calibration direction updated")
class ShearDialogBase(base_dialog.BaseDialog):
def __init__(self, master, parent_dialog, item):
self.parent_dialog = parent_dialog
self.isNew = (item == None)
if self.isNew:
self.item = ShearMeasurement()
else:
self.item = item
base_dialog.BaseDialog.__init__(self, master)
def ShowColumnPicker(self, parentDialog, pick, selectedColumn):
return self.parent_dialog.ShowColumnPicker(parentDialog, pick, selectedColumn)
def parse_height(self):
wind_speed_text = self.windSpeed.get()
if len(wind_speed_text) > 0:
numbers = re.findall(r"[-+]?\d*\.\d+|[-+]?\d+", wind_speed_text)
print numbers
if len(numbers) > 0:
try:
self.height.set("{0}".format(float(numbers[0])))
except:
Status.add("Cannot parse height")
class ShearMeasurementDialog(ShearDialogBase):
def __init__(self, master, parent_dialog, item):
ShearDialogBase.__init__(self, master, parent_dialog, item)
def body(self, master):
self.prepareColumns(master)
self.addTitleRow(master, "Shear measurement:")
self.height = self.addEntry(master, "Height:", validation.ValidatePositiveFloat(master), self.item.height)
self.windSpeed = self.addPickerEntry(master, "Wind Speed:", validation.ValidateNotBlank(master), self.item.wind_speed_column, width = 60)
#dummy label to indent controls
tk.Label(master, text=" " * 5).grid(row = (self.row-1), sticky=tk.W, column=self.titleColumn)
def apply(self):
self.item.height = float(self.height.get())
self.item.wind_speed_column = self.windSpeed.get().strip()
if self.isNew:
Status.add("Shear measurement created")
else:
Status.add("Shear measurement updated")
class REWSProfileLevelDialog(ShearDialogBase):
def __init__(self, master, parent_dialog, item):
ShearDialogBase.__init__(self, master, parent_dialog, item)
def body(self, master):
self.prepareColumns(master)
self.addTitleRow(master, "REWS Level Settings:")
self.height = self.addEntry(master, "Height:", validation.ValidatePositiveFloat(master), self.item.height)
parse_button = tk.Button(master, text="Parse", command = self.parse_height, width=3, height=1)
parse_button.grid(row=(self.row-1), sticky=tk.N, column=self.inputColumn, padx = 160)
self.windSpeed = self.addPickerEntry(master, "Wind Speed:", validation.ValidateNotBlank(master), self.item.wind_speed_column, width = 60)
self.windDirection = self.addPickerEntry(master, "Wind Direction:", None, self.item.wind_direction_column, width = 60)
self.upflow = self.addPickerEntry(master, "Upflow:", None, self.item.upflow_column, width = 60)
#dummy label to indent controls
tk.Label(master, text=" " * 5).grid(row = (self.row-1), sticky=tk.W, column=self.titleColumn)
def apply(self):
self.item.height = float(self.height.get())
self.item.wind_speed_column = self.windSpeed.get().strip()
self.item.wind_direction_column = self.windDirection.get().strip()
self.item.upflow_column = self.upflow.get().strip()
if self.isNew:
Status.add("Rotor level created")
else:
Status.add("Rotor level updated")
class ExclusionsGridBox(DialogGridBox):
def get_headers(self):
return ["StartDate", "EndDate", "Active"]
def get_item_values(self, item):
values_dict = {}
values_dict["StartDate"] = base_dialog.convertDateToText(item.startDate)
values_dict["EndDate"] = base_dialog.convertDateToText(item.endDate)
values_dict["Active"] = item.active
return values_dict
def new_dialog(self, master, parent_dialog, item):
return ExclusionDialog(master, self.parent_dialog, item)
class FiltersGridBox(DialogGridBox):
def get_headers(self):
return ["Column","Value","FilterType","Inclusive","Active"]
def get_item_values(self, item):
values_dict = {}
values_dict["Column"] = item.column
values_dict["Value"] = item.value
values_dict["FilterType"] = item.filterType
values_dict["Inclusive"] = item.inclusive
values_dict["Active"] = item.active
return values_dict
def new_dialog(self, master, parent_dialog, item):
return FilterDialog(master, self.parent_dialog, item)
class CalibrationSectorsGridBox(DialogGridBox):
def get_headers(self):
return ["Direction","Slope","Offset","Active"]
def get_item_values(self, item):
values_dict = {}
values_dict["Direction"] = item.direction
values_dict["Slope"] = item.slope
values_dict["Offset"] = item.offset
values_dict["Active"] = item.active
return values_dict
def new_dialog(self, master, parent_dialog, item):
return CalibrationDirectionDialog(master, self.parent_dialog, item)
class ShearGridBox(DialogGridBox):
def get_headers(self):
return ["Height","WindSpeed"]
def get_item_values(self, item):
values_dict = {}
values_dict["Height"] = item.height
values_dict["WindSpeed"] = item.wind_speed_column
return values_dict
def new_dialog(self, master, parent_dialog, item):
return ShearMeasurementDialog(master, self.parent_dialog, item)
class REWSGridBox(DialogGridBox):
def get_headers(self):
return ["Height","WindSpeed", "WindDirection"]
def get_item_values(self, item):
values_dict = {}
values_dict["Height"] = item.height
values_dict["WindSpeed"] = item.wind_speed_column
values_dict["WindDirection"] = item.wind_direction_column
return values_dict
def new_dialog(self, master, parent_dialog, item):
return REWSProfileLevelDialog(master, self.parent_dialog, item)
class DatasetConfigurationDialog(base_dialog.BaseConfigurationDialog):
def getInitialFileName(self):
return "Dataset"
def addFilePath(self, master, path):
pass
def file_path_update(self, *args):
self.config.input_time_series.set_base(self.filePath.get())
def time_series_path_update(self, *args):
self.config.input_time_series.absolute_path = self.inputTimeSeriesPath.get()
def add_general(self, master, path):
self.filePath = self.addFileSaveAsEntry(master, "Configuration XML File Path:", validation.ValidateDatasetFilePath(master), path)
self.filePath.variable.trace("w", self.file_path_update)
self.name = self.addEntry(master, "Dataset Name:", validation.ValidateNotBlank(master), self.config.name)
self.inputTimeSeriesPath = self.addFileOpenEntry(master, "Input Time Series Path:", validation.ValidateTimeSeriesFilePath(master), self.config.input_time_series.absolute_path, self.filePath)
self.inputTimeSeriesPath.variable.trace("w", self.time_series_path_update)
self.separator = self.addOption(master, "Separator:", ["TAB", "COMMA", "SPACE", "SEMI-COLON"], self.config.separator)
self.separator.trace("w", self.columnSeparatorChange)
self.decimal = self.addOption(master, "Decimal Mark:", ["FULL STOP", "COMMA"], self.config.decimal)
self.decimal.trace("w", self.decimalChange)
self.headerRows = self.addEntry(master, "Header Rows:", validation.ValidateNonNegativeInteger(master), self.config.headerRows)
self.startDate = self.addDatePickerEntry(master, "Start Date:", None, self.config.startDate)
self.endDate = self.addDatePickerEntry(master, "End Date:", None, self.config.endDate)
self.hubWindSpeedMode = self.addOption(master, "Hub Wind Speed Mode:", ["None", "Calculated", "Specified"], self.config.hubWindSpeedMode)
self.hubWindSpeedMode.trace("w", self.hubWindSpeedModeChange)
self.calibrationMethod = self.addOption(master, "Calibration Method:", ["Specified", "LeastSquares"], self.config.calibrationMethod)
self.calibrationMethod.trace("w", self.calibrationMethodChange)
self.densityMode = self.addOption(master, "Density Mode:", ["Calculated", "Specified"], self.config.densityMode)
self.densityMode.trace("w", self.densityMethodChange)
def add_measurements(self, master):
self.timeStepInSeconds = self.addEntry(master, "Time Step In Seconds:", validation.ValidatePositiveInteger(master), self.config.timeStepInSeconds)
self.badData = self.addEntry(master, "Bad Data Value:", validation.ValidateFloat(master), self.config.badData)
self.dateFormat = self.addEntry(master, "Date Format:", validation.ValidateNotBlank(master), self.config.dateFormat, width = 60)
pickDateFormatButton = tk.Button(master, text=".", command = base_dialog.DateFormatPicker(self, self.dateFormat, ['%Y-%m-%d %H:%M:%S', '%Y-%m-%dT%H:%M:%S', '%d-%m-%y %H:%M', '%y-%m-%d %H:%M', '%d/%m/%Y %H:%M', '%d/%m/%Y %H:%M:%S', '%d/%m/%y %H:%M', '%y/%m/%d %H:%M']), width=5, height=1)
pickDateFormatButton.grid(row=(self.row-1), sticky=tk.E+tk.N, column=self.buttonColumn)
self.timeStamp = self.addPickerEntry(master, "Time Stamp:", validation.ValidateNotBlank(master), self.config.timeStamp, width = 60)
self.turbineLocationWindSpeed = self.addPickerEntry(master, "Turbine Location Wind Speed:", None, self.config.turbineLocationWindSpeed, width = 60) #Should this be with reference wind speed?
self.hubWindSpeed = self.addPickerEntry(master, "Hub Wind Speed:", None, self.config.hubWindSpeed, width = 60)
self.hubTurbulence = self.addPickerEntry(master, "Hub Turbulence:", None, self.config.hubTurbulence, width = 60)
self.temperature = self.addPickerEntry(master, "Temperature:", None, self.config.temperature, width = 60)
self.pressure = self.addPickerEntry(master, "Pressure:", None, self.config.pressure, width = 60)
self.density = self.addPickerEntry(master, "Density:", None, self.config.density, width = 60)
self.inflowAngle = self.addPickerEntry(master, "Inflow Angle:", None, self.config.inflowAngle, width = 60)
self.inflowAngle.setTip('Not required')
def add_power(self, master):
self.power = self.addPickerEntry(master, "Power:", None, self.config.power, width = 60)
self.powerMin = self.addPickerEntry(master, "Power Min:", None, self.config.powerMin, width = 60)
self.powerMax = self.addPickerEntry(master, "Power Max:", None, self.config.powerMax, width = 60)
self.powerSD = self.addPickerEntry(master, "Power Std Dev:", None, self.config.powerSD, width = 60)
def add_reference(self, master):
self.referenceWindSpeed = self.addPickerEntry(master, "Reference Wind Speed:", None, self.config.referenceWindSpeed, width = 60)
self.referenceWindSpeedStdDev = self.addPickerEntry(master, "Reference Wind Speed Std Dev:", None, self.config.referenceWindSpeedStdDev, width = 60)
self.referenceWindDirection = self.addPickerEntry(master, "Reference Wind Direction:", None, self.config.referenceWindDirection, width = 60)
self.referenceWindDirectionOffset = self.addEntry(master, "Reference Wind Direction Offset:", validation.ValidateFloat(master), self.config.referenceWindDirectionOffset)
def add_reference_shear(self, master):
self.shearCalibrationMethod = self.addOption(master, "Shear Calibration Method:", ["None", "LeastSquares"], self.config.shearCalibrationMethod)
self.row += 1
label = tk.Label(master, text="Reference Shear Heights (Power Law):")
label.grid(row=self.row, sticky=tk.W, column=self.titleColumn, columnspan = 2)
self.row += 1
self.referenceShearGridBox = ShearGridBox(master, self, self.row, self.inputColumn)
self.referenceShearGridBox.add_items(self.config.referenceShearMeasurements)
self.copyToREWSButton = tk.Button(master, text="Copy To REWS", command = self.copyToREWSShearProfileLevels, width=12, height=1)
self.copyToREWSButton.grid(row=self.row, sticky=tk.E+tk.N, column=self.buttonColumn)
def add_turbine_shear(self, master):
label = tk.Label(master, text="Turbine Shear Heights (Power Law):")
label.grid(row=self.row, sticky=tk.W, column=self.titleColumn, columnspan = 2)
self.row += 1
self.turbineShearGridBox = ShearGridBox(master, self, self.row, self.inputColumn)
self.turbineShearGridBox.add_items(self.config.turbineShearMeasurements)
def add_rews(self, master):
self.addTitleRow(master, "REWS Settings:")
self.rewsDefined = self.addCheckBox(master, "REWS Active", self.config.rewsDefined)
self.numberOfRotorLevels = self.addEntry(master, "REWS Number of Rotor Levels:", validation.ValidateNonNegativeInteger(master), self.config.numberOfRotorLevels)
self.rotorMode = self.addOption(master, "REWS Rotor Mode:", ["EvenlySpacedLevels", "ProfileLevels"], self.config.rotorMode)
self.hubMode = self.addOption(master, "Hub Mode:", ["Interpolated", "PiecewiseExponent"], self.config.hubMode)
label = tk.Label(master, text="REWS Profile Levels:")
label.grid(row=self.row, sticky=tk.W, column=self.titleColumn, columnspan = 2)
self.row += 1
self.rewsGridBox = REWSGridBox(master, self, self.row, self.inputColumn)
self.rewsGridBox.add_items(self.config.rewsProfileLevels)
self.copyToShearButton = tk.Button(master, text="Copy To Shear", command = self.copyToShearREWSProfileLevels, width=12, height=1)
self.copyToShearButton.grid(row=self.row, sticky=tk.E+tk.N, column=self.buttonColumn)
def add_specified_calibration(self, master):
label = tk.Label(master, text="Calibration Sectors:")
label.grid(row=self.row, sticky=tk.W, column=self.titleColumn, columnspan = 2)
self.row += 1
self.calibrationSectorsGridBox = CalibrationSectorsGridBox(master, self, self.row, self.inputColumn)
self.calibrationSectorsGridBox.add_items(self.config.calibrationSectors)
def add_calculated_calibration(self, master):
self.calibrationStartDate = self.addDatePickerEntry(master, "Calibration Start Date:", None, self.config.calibrationStartDate)
self.calibrationEndDate = self.addDatePickerEntry(master, "Calibration End Date:", None, self.config.calibrationEndDate)
self.siteCalibrationNumberOfSectors = self.addEntry(master, "Number of Sectors:", None, self.config.siteCalibrationNumberOfSectors)
self.siteCalibrationCenterOfFirstSector = self.addEntry(master, "Center of First Sector:", None, self.config.siteCalibrationCenterOfFirstSector)
label = tk.Label(master, text="Calibration Filters:")
label.grid(row=self.row, sticky=tk.W, column=self.titleColumn, columnspan = 2)
self.row += 1
self.calibrationFiltersGridBox = FiltersGridBox(master, self, self.row, self.inputColumn)
self.calibrationFiltersGridBox.add_items(self.config.calibrationFilters)
def add_exclusions(self, master):
#Exclusions
label = tk.Label(master, text="Exclusions:")
label.grid(row=self.row, sticky=tk.W, column=self.titleColumn, columnspan = 2)
self.row += 1
self.exclusionsGridBox = ExclusionsGridBox(master, self, self.row, self.inputColumn)
self.exclusionsGridBox.add_items(self.config.exclusions)
def add_filters(self, master):
#Filters
label = tk.Label(master, text="Filters:")
label.grid(row=self.row, sticky=tk.W, column=self.titleColumn, columnspan = 2)
self.row += 1
self.filtersGridBox = FiltersGridBox(master, self, self.row, self.inputColumn)
self.filtersGridBox.add_items(self.config.filters)
def add_meta_data(self, master):
self.data_type = self.addOption(master, "Data Type:", ["Mast", "LiDAR", "SoDAR", "Mast & LiDAR", "Mast & SoDAR", "Spinner"], self.config.data_type)
self.outline_site_classification = self.addOption(master, "Outline Site Classification:", ["Flat", "Complex", "Offshore"], self.config.outline_site_classification)
self.outline_forestry_classification = self.addOption(master, "Outline Forestry Classification:", ["Forested", "Non-Forested"], self.config.outline_forestry_classification)
#IEC Site Classification [• As described by Annex B of IEC 61400-12-1 (2006)]
self.iec_terrain_classification = self.addEntry(master, "IEC Terrain Classification:", None, self.config.iec_terrain_classification)
self.latitude = self.addEntry(master, "Latitude:", validation.ValidateOptionalFloat(master), self.config.latitude)
self.longitude = self.addEntry(master, "Longitude:", validation.ValidateOptionalFloat(master), self.config.longitude)
self.continent = self.addOption(master, "Continent:", ["Europe", "North America", "Asia", "South America", "Africa", "Antarctica"], self.config.continent)
self.country = self.addOption(master, "Country:", self.get_countries(), self.config.country)
self.elevation_above_sea_level = self.addEntry(master, "Elevation Above Sea Level:", validation.ValidateOptionalFloat(master), self.config.elevation_above_sea_level)
self.measurement_compliance = self.addOption(master, "Measurement Compliance:", ["IEC 61400-12-1 (2006) Compliant", "None", "Unknown"], self.config.measurement_compliance)
self.anemometry_type = self.addOption(master, "Anemometry Type:", ["Sonic", "Cups", "Spinner", "Not Applicable"], self.config.anemometry_type)
self.anemometry_heating = self.addOption(master, "Anemometry Heating:", ["Heated", "Unheated", "Unknown", "Not Applicable"], self.config.anemometry_heating)
self.turbulence_measurement_type = self.addOption(master, "Turbulence Measurement Type", ["LiDAR", "SoDAR", "Cups", "Sonic"], self.config.turbulence_measurement_type)
self.power_measurement_type = self.addOption(master, "Power Measurement Type:", ["Transducer", "SCADA", "Unknown"], self.config.power_measurement_type)
self.turbine_control_type = self.addOption(master, "Turbine Control Type:", ["Pitch", "Stall", "Active Stall"], self.config.turbine_control_type)
self.turbine_technology_vintage = self.addEntry(master, "Turbine Technology Vintage:", validation.ValidateOptionalPositiveInteger(master), self.config.turbine_technology_vintage)
self.time_zone = self.addOption(master, "Time Zone:", ["Local", "UTC"], self.config.time_zone)
def get_countries(self):
return ["Åland Islands","Albania","Algeria","American Samoa","Andorra","Angola","Anguilla","Antarctica","Antigua and Barbuda","Argentina","Armenia","Aruba","Australia","Austria","Azerbaijan","Bahamas","Bahrain","Bangladesh","Barbados","Belarus","Belgium","Belize","Benin","Bermuda","Bhutan","Bolivia","Bosnia and Herzegovina","Botswana","Bouvet Island","Brazil","British Indian Ocean Territory","Brunei Darussalam","Bulgaria","Burkina Faso","Burundi","Cambodia","Cameroon","Canada","Cape Verde","Caribbean Netherlands ","Cayman Islands","Central African Republic","Chad","Chile","China","Christmas Island","Cocos (Keeling) Islands","Colombia","Comoros","Congo","Congo, Democratic Republic of","Cook Islands","Costa Rica","Côte d'Ivoire","Croatia","Cuba","Curaçao","Cyprus","Czech Republic","Denmark","Djibouti","Dominica","Dominican Republic","Ecuador","Egypt","El Salvador","English Name","Equatorial Guinea","Eritrea","Estonia","Ethiopia","Falkland Islands","Faroe Islands","Fiji","Finland","France","French Guiana","French Polynesia","French Southern Territories","Gabon","Gambia","Georgia","Germany","Ghana","Gibraltar","Greece","Greenland","Grenada","Guadeloupe","Guam","Guatemala","Guernsey","Guinea","Guinea-Bissau","Guyana","Haiti","Heard and McDonald Islands","Honduras","Hong Kong","Hungary","Iceland","India","Indonesia","Iran","Iraq","Ireland","Isle of Man","Israel","Italy","Jamaica","Japan","Jersey","Jordan","Kazakhstan","Kenya","Kiribati","Kuwait","Kyrgyzstan","Lao People's Democratic Republic","Latvia","Lebanon","Lesotho","Liberia","Libya","Liechtenstein","Lithuania","Luxembourg","Macau","Macedonia","Madagascar","Malawi","Malaysia","Maldives","Mali","Malta","Marshall Islands","Martinique","Mauritania","Mauritius","Mayotte","Mexico","Micronesia, Federated States of","Moldova","Monaco","Mongolia","Montenegro","Montserrat","Morocco","Mozambique","Myanmar","Namibia","Nauru","Nepal","New Caledonia","New Zealand","Nicaragua","Niger","Nigeria","Niue","Norfolk Island","North Korea","Northern Mariana Islands","Norway","Oman","Pakistan","Palau","Palestine, State of","Panama","Papua New Guinea","Paraguay","Peru","Philippines","Pitcairn","Poland","Portugal","Puerto Rico","Qatar","Réunion","Romania","Russian Federation","Rwanda","Saint Barthélemy","Saint Helena","Saint Kitts and Nevis","Saint Lucia","Saint Vincent and the Grenadines","Saint-Martin (France)","Samoa","San Marino","Sao Tome and Principe","Saudi Arabia","Senegal","Serbia","Seychelles","Sierra Leone","Singapore","Sint Maarten (Dutch part)","Slovakia","Slovenia","Solomon Islands","Somalia","South Africa","South Georgia and the South Sandwich Islands","South Korea","South Sudan","Spain","Sri Lanka","St. Pierre and Miquelon","Sudan","Suriname","Svalbard and Jan Mayen Islands","Swaziland","Sweden","Switzerland","Syria","Taiwan","Tajikistan","Tanzania","Thailand","The Netherlands","Timor-Leste","Togo","Tokelau","Tonga","Trinidad and Tobago","Tunisia","Turkey","Turkmenistan","Turks and Caicos Islands","Tuvalu","Uganda","Ukraine","United Arab Emirates","United Kingdom","United States","United States Minor Outlying Islands","Uruguay","Uzbekistan","Vanuatu","Vatican","Venezuela","Vietnam","Virgin Islands (British)","Virgin Islands (U.S.)","Wallis and Futuna Islands","Western Sahara","Yemen","Zambia","Zimbabwe"]
def add_turbine(self, master):
self.cutInWindSpeed = self.addEntry(master, "Cut In Wind Speed:", validation.ValidatePositiveFloat(master), self.config.cutInWindSpeed)
self.cutOutWindSpeed = self.addEntry(master, "Cut Out Wind Speed:", validation.ValidatePositiveFloat(master), self.config.cutOutWindSpeed)
self.ratedPower = self.addEntry(master, "Rated Power:", validation.ValidatePositiveFloat(master), self.config.ratedPower)
self.hubHeight = self.addEntry(master, "Hub Height:", validation.ValidatePositiveFloat(master), self.config.hubHeight)
self.diameter = self.addEntry(master, "Diameter:", validation.ValidatePositiveFloat(master), self.config.diameter)
def addFormElements(self, master, path):
self.availableColumnsFile = None
self.columnsFileHeaderRows = None
self.availableColumns = []
self.shearWindSpeedHeights = []
self.shearWindSpeeds = []
nb = ttk.Notebook(master, height=400)
nb.pressed_index = None
general_tab = tk.Frame(nb)
turbines_tab = tk.Frame(nb)
measurements_tab = tk.Frame(nb)
power_tab = tk.Frame(nb)
reference_tab = tk.Frame(nb)
reference_shear_tab = tk.Frame(nb)
turbine_shear_tab = tk.Frame(nb)
rews_tab = tk.Frame(nb)
calculated_calibration_tab = tk.Frame(nb)
specified_calibration_tab = tk.Frame(nb)
exclusions_tab = tk.Frame(nb)
filters_tab = tk.Frame(nb)
meta_data_tab = tk.Frame(nb)
nb.add(general_tab, text='General', padding=3)
nb.add(turbines_tab, text='Turbine', padding=3)
nb.add(measurements_tab, text='Measurements', padding=3)
nb.add(power_tab, text='Power', padding=3)
nb.add(reference_tab, text='Reference', padding=3)
nb.add(reference_shear_tab, text='Reference Shear', padding=3)
nb.add(turbine_shear_tab, text='Turbine Shear', padding=3)
nb.add(rews_tab, text='REWS', padding=3)
nb.add(calculated_calibration_tab, text='Calibration (Calculated)', padding=3)
nb.add(specified_calibration_tab, text='Calibration (Specified)', padding=3)
nb.add(exclusions_tab, text='Exclusions', padding=3)
nb.add(filters_tab, text='Filters', padding=3)
nb.add(meta_data_tab, text='Meta Data', padding=3)
nb.grid(row=self.row, sticky=tk.E+tk.W+tk.N+tk.S, column=self.titleColumn, columnspan=8)
master.grid_rowconfigure(self.row, weight=1)
self.row += 1
self.add_general(general_tab, path)
self.add_turbine(turbines_tab)
self.add_measurements(measurements_tab)
self.add_power(power_tab)
self.add_reference(reference_tab)
self.add_reference_shear(reference_shear_tab)
self.add_turbine_shear(turbine_shear_tab)
self.add_rews(rews_tab)
self.add_calculated_calibration(calculated_calibration_tab)
self.add_specified_calibration(specified_calibration_tab)
self.add_exclusions(exclusions_tab)
self.add_filters(filters_tab)
self.add_meta_data(meta_data_tab)
self.calibrationMethodChange()
self.densityMethodChange()
def densityMethodChange(self, *args):
if self.densityMode.get() == "Specified":
densityModeSpecifiedComment = "Not required when density mode is set to specified"
self.temperature.setTip(densityModeSpecifiedComment)
self.pressure.setTip(densityModeSpecifiedComment)
self.density.clearTip()
elif self.densityMode.get() == "Calculated":
densityModeCalculatedComment = "Not required when density mode is set to calculate"
self.temperature.clearTip()
self.pressure.clearTip()
self.density.setTip(densityModeCalculatedComment)
elif self.densityMode.get() == "None":
densityModeNoneComment = "Not required when density mode is set to none"
self.temperature.setTip(densityModeNoneComment)
self.pressure.setTip(densityModeNoneComment)
self.density.setTip(densityModeNoneComment)
else:
raise Exception("Unknown density methods: %s" % self.densityMode.get())
def columnSeparatorChange(self, *args):
Status.add('reading separator', verbosity=2)
sep = getSeparatorValue(self.separator.get())
self.read_dataset()
return sep
def decimalChange(self, *args):
Status.add('reading decimal', verbosity=2)
decimal = getDecimalValue(self.decimal.get())
self.read_dataset()
return decimal
def hubWindSpeedModeChange(self, *args):
self.calibrationMethodChange()
def calibrationMethodChange(self, *args):
if self.hubWindSpeedMode.get() == "Calculated":
hubWindSpeedModeCalculatedComment = "Not required for calculated hub wind speed mode"
specifiedCalibrationMethodComment = "Not required for Specified Calibration Method"
leastSquaresCalibrationMethodComment = "Not required for Least Squares Calibration Method"
self.hubWindSpeed.setTip(hubWindSpeedModeCalculatedComment)
self.hubTurbulence.setTip(hubWindSpeedModeCalculatedComment)
self.siteCalibrationNumberOfSectors.clearTip()
self.siteCalibrationCenterOfFirstSector.clearTip()
self.referenceWindSpeed.clearTip()
self.referenceWindSpeedStdDev.clearTip()
self.referenceWindDirection.clearTip()
self.referenceWindDirectionOffset.clearTip()
if self.calibrationMethod.get() in ("LeastSquares", "York"):
self.turbineLocationWindSpeed.clearTip()
self.calibrationStartDate.clearTip()
self.calibrationEndDate.clearTip()
self.calibrationSectorsGridBox.setTip(leastSquaresCalibrationMethodComment)
self.calibrationFiltersGridBox.clearTip()
elif self.calibrationMethod.get() == "Specified":
self.turbineLocationWindSpeed.setTipNotRequired()
self.calibrationStartDate.setTipNotRequired()
self.calibrationEndDate.setTipNotRequired()
self.calibrationSectorsGridBox.clearTip()
self.calibrationFiltersGridBox.setTip(specifiedCalibrationMethodComment)
else:
if len(self.calibrationMethod.get()) > 0:
raise Exception("Unknown calibration method: %s" % self.calibrationMethod.get())
elif self.hubWindSpeedMode.get() == "Specified":
hubWindSpeedModeSpecifiedComment = "Not required for specified hub wind speed mode"
self.hubWindSpeed.clearTip()
self.hubTurbulence.clearTip()
self.turbineLocationWindSpeed.setTip(hubWindSpeedModeSpecifiedComment)
self.calibrationStartDate.setTip(hubWindSpeedModeSpecifiedComment)
self.calibrationEndDate.setTip(hubWindSpeedModeSpecifiedComment)
self.siteCalibrationNumberOfSectors.setTip(hubWindSpeedModeSpecifiedComment)
self.siteCalibrationCenterOfFirstSector.setTip(hubWindSpeedModeSpecifiedComment)
self.referenceWindSpeed.setTip(hubWindSpeedModeSpecifiedComment)
self.referenceWindSpeedStdDev.setTip(hubWindSpeedModeSpecifiedComment)
self.referenceWindDirection.setTip(hubWindSpeedModeSpecifiedComment)
self.referenceWindDirectionOffset.setTip(hubWindSpeedModeSpecifiedComment)
elif self.hubWindSpeedMode.get() == "None":
hubWindSpeedModeNoneComment = "Not required when hub wind speed mode is set to none"
self.hubWindSpeed.setTip(hubWindSpeedModeNoneComment)
self.hubTurbulence.setTip(hubWindSpeedModeNoneComment)
self.turbineLocationWindSpeed.setTip(hubWindSpeedModeNoneComment)
self.calibrationStartDate.setTip(hubWindSpeedModeNoneComment)
self.calibrationEndDate.setTip(hubWindSpeedModeNoneComment)
self.siteCalibrationNumberOfSectors.setTip(hubWindSpeedModeNoneComment)
self.siteCalibrationCenterOfFirstSector.setTip(hubWindSpeedModeNoneComment)
self.referenceWindSpeed.setTip(hubWindSpeedModeNoneComment)
self.referenceWindSpeedStdDev.setTip(hubWindSpeedModeNoneComment)
self.referenceWindDirection.setTip(hubWindSpeedModeNoneComment)
self.referenceWindDirectionOffset.setTip(hubWindSpeedModeNoneComment)
else:
raise Exception("Unknown hub wind speed mode: %s" % self.hubWindSpeedMode.get())
def copyToREWSShearProfileLevels(self):
self.rewsGridBox.remove_all()
for item in self.referenceShearGridBox.get_items():
self.rewsGridBox.add_item(ShearMeasurement(item.height, item.wind_speed_column))
def copyToShearREWSProfileLevels(self):
self.referenceShearGridBox.remove_all()
for item in self.rewsGridBox.get_items():
self.referenceShearGridBox.add_item(ShearMeasurement(item.height, item.wind_speed_column))
def getHeaderRows(self):
headerRowsText = self.headerRows.get()
if len(headerRowsText) > 0:
return int(headerRowsText)
else:
return 0
def ShowColumnPicker(self, parentDialog, pick, selectedColumn):
if self.config.input_time_series.absolute_path == None:
tkMessageBox.showwarning(
"InputTimeSeriesPath Not Set",
"You must set the InputTimeSeriesPath before using the ColumnPicker"
)
return
inputTimeSeriesPath = self.config.input_time_series.absolute_path
headerRows = self.getHeaderRows()
if self.columnsFileHeaderRows != headerRows or self.availableColumnsFile != inputTimeSeriesPath:
try:
self.read_dataset()
except ExceptionHandler.ExceptionType as e:
tkMessageBox.showwarning(
"Column header error",
"It was not possible to read column headers using the provided inputs.\rPlease check and amend 'Input Time Series Path' and/or 'Header Rows'.\r"
)
ExceptionHandler.add(e, "ERROR reading columns from {0}".format(inputTimeSeriesPath))
self.columnsFileHeaderRows = headerRows
self.availableColumnsFile = inputTimeSeriesPath
try:
base_dialog.ColumnPickerDialog(parentDialog, pick, self.availableColumns, selectedColumn)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "Error picking column")
def read_dataset(self):
Status.add('reading dataSet', verbosity=2)
inputTimeSeriesPath = self.config.input_time_series.absolute_path
headerRows = self.getHeaderRows()
dataFrame = pd.read_csv(inputTimeSeriesPath, sep = getSeparatorValue(self.separator.get()), skiprows = headerRows, decimal = getDecimalValue(self.decimal.get()))
self.availableColumns = []
for col in dataFrame:
self.availableColumns.append(col)
def setConfigValues(self):
self.config.name = self.name.get()
self.config.startDate = base_dialog.getDateFromEntry(self.startDate)
self.config.endDate = base_dialog.getDateFromEntry(self.endDate)
self.config.hubWindSpeedMode = self.hubWindSpeedMode.get()
self.config.calibrationMethod = self.calibrationMethod.get()
self.config.densityMode = self.densityMode.get()
self.config.input_time_series.absolute_path = self.inputTimeSeriesPath.get()
self.config.timeStepInSeconds = int(self.timeStepInSeconds.get())
self.config.badData = float(self.badData.get())
self.config.dateFormat = self.dateFormat.get()
self.config.separator = self.separator.get()
self.config.decimal = self.decimal.get()
self.config.headerRows = self.getHeaderRows()
self.config.timeStamp = self.timeStamp.get()
self.config.power = self.power.get()
self.config.powerMin = self.powerMin.get()
self.config.powerMax = self.powerMax.get()
self.config.powerSD = self.powerSD.get()
self.config.referenceWindSpeed = self.referenceWindSpeed.get()
self.config.referenceWindSpeedStdDev = self.referenceWindSpeedStdDev.get()
self.config.referenceWindDirection = self.referenceWindDirection.get()
self.config.referenceWindDirectionOffset = base_dialog.floatSafe(self.referenceWindDirectionOffset.get())
self.config.turbineLocationWindSpeed = self.turbineLocationWindSpeed.get()
self.config.inflowAngle = self.inflowAngle.get()
self.config.temperature = self.temperature.get()
self.config.pressure = self.pressure.get()
self.config.density = self.density.get()
self.config.hubWindSpeed = self.hubWindSpeed.get()
self.config.hubTurbulence = self.hubTurbulence.get()
#REWS
self.config.rewsDefined = bool(self.rewsDefined.get())
self.config.numberOfRotorLevels = base_dialog.intSafe(self.numberOfRotorLevels.get())
self.config.rotorMode = self.rotorMode.get()
self.config.hubMode = self.hubMode.get()
self.config.rewsProfileLevels = self.rewsGridBox.get_items()
#shear masurements
self.config.referenceShearMeasurements = self.referenceShearGridBox.get_items()
self.config.turbineShearMeasurements = self.turbineShearGridBox.get_items()
self.config.shearCalibrationMethod = self.shearCalibrationMethod.get()
#calibrations
self.config.calibrationStartDate = base_dialog.getDateFromEntry(self.calibrationStartDate)
self.config.calibrationEndDate = base_dialog.getDateFromEntry(self.calibrationEndDate)
self.config.siteCalibrationNumberOfSectors = base_dialog.intSafe(self.siteCalibrationNumberOfSectors.get())
self.config.siteCalibrationCenterOfFirstSector = base_dialog.intSafe(self.siteCalibrationCenterOfFirstSector.get())
#calbirations
self.config.calibrationSectors = self.calibrationSectorsGridBox.get_items()
#calibration filters
self.config.calibrationFilters = self.calibrationFiltersGridBox.get_items()
#exclusions
self.config.exclusions = self.exclusionsGridBox.get_items()
#filters
self.config.filters = self.filtersGridBox.get_items()
#turbines
self.config.cutInWindSpeed = float(self.cutInWindSpeed.get())
self.config.cutOutWindSpeed = float(self.cutOutWindSpeed.get())
self.config.ratedPower = float(self.ratedPower.get())
self.config.hubHeight = float(self.hubHeight.get())
self.config.diameter = float(self.diameter.get())
#meta data
self.config.data_type = self.data_type.get()
self.config.outline_site_classification = self.outline_site_classification.get()
self.config.outline_forestry_classification = self.outline_forestry_classification.get()
self.config.iec_terrain_classification = self.iec_terrain_classification.get()
if len(self.latitude.get()) > 0:
self.config.latitude = float(self.latitude.get())
else:
self.config.latitude = None
if len(self.longitude.get()) > 0:
self.config.longitude = float(self.longitude.get())
else:
self.config.longitude = None
self.config.continent = self.continent.get()
self.config.country = self.country.get()
if len(self.elevation_above_sea_level.get()) > 0:
self.config.elevation_above_sea_level = float(self.elevation_above_sea_level.get())
else:
self.config.elevation_above_sea_level = None
self.config.measurement_compliance = self.measurement_compliance.get()
self.config.anemometry_type = self.anemometry_type.get()
self.config.anemometry_heating = self.anemometry_heating.get()
self.config.turbulence_measurement_type = self.turbulence_measurement_type.get()
self.config.power_measurement_type = self.power_measurement_type.get()
self.config.turbine_control_type = self.turbine_control_type.get()
if len(self.turbine_technology_vintage.get()) > 0:
self.config.turbine_technology_vintage = int(self.turbine_technology_vintage.get())
else:
self.config.turbine_technology_vintage = None
self.config.time_zone = self.time_zone.get()
class DatasetGridBox(GridBox):
def __init__(self, master, parent_dialog, row, column, datasets_file_manager):
self.parent_dialog = parent_dialog
headers = ["Dataset", "Exists"]
GridBox.__init__(self, master, headers, row, column)
self.pop_menu.add_command(label="Add Existing", command=self.add)
self.pop_menu_add.add_command(label="Add Existing", command=self.add)
self.datasets_file_manager = datasets_file_manager
self.add_items(self.datasets_file_manager)
def size(self):
return self.item_count()
def get(self, index):
return self.get_items()[index].display_path
def get_item_values(self, item):
values_dict = {}
values_dict["Dataset"] = item.display_path
values_dict["Exists"] = os.path.isfile(item.absolute_path)
return values_dict
def get_header_scale(self):
return 10
def new(self):
try:
config = DatasetConfiguration()
DatasetConfigurationDialog(self.master, self.add_from_file_path, config)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR creating dataset config")
def add(self):
preferences = Preferences.get()
file_name = tkFileDialog.askopenfilename(parent=self.master, initialdir=preferences.dataset_last_opened_dir(), defaultextension=".xml")
if len(file_name) > 0: self.add_from_file_path(file_name)
def add_from_file_path(self, path):
try:
preferences = Preferences.get()
preferences.datasetLastOpened = path
preferences.save()
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "Cannot save preferences")
dataset = self.datasets_file_manager.append_absolute(path)
self.add_item(dataset)
self.parent_dialog.validate_datasets.validate()
def edit_item(self, item):
try:
datasetConfig = DatasetConfiguration(item.absolute_path)
DatasetConfigurationDialog(self.master, None, datasetConfig, None)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR editing")
def remove(self):
selected = self.get_selected()
self.datasets_file_manager.remove(selected)
GridBox.remove(self)
self.parent_dialog.validate_datasets.validate()
|
mit
|
DGrady/pandas
|
pandas/tests/plotting/test_datetimelike.py
|
2
|
51386
|
""" Test cases for time series specific (freq conversion, etc) """
from datetime import datetime, timedelta, date, time
import pytest
from pandas.compat import lrange, zip
import numpy as np
from pandas import Index, Series, DataFrame, NaT
from pandas.compat import is_platform_mac
from pandas.core.indexes.datetimes import date_range, bdate_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.tseries.offsets import DateOffset
from pandas.core.indexes.period import period_range, Period, PeriodIndex
from pandas.core.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.util.testing as tm
from pandas.tests.plotting.common import (TestPlotBase,
_skip_if_no_scipy_gaussian_kde)
tm._skip_if_no_mpl()
class TestTSPlot(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def teardown_method(self, method):
tm.close()
@pytest.mark.slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
df = DataFrame(np.random.randn(10, 9), index=range(10))
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
assert label.get_fontsize() == 2
@pytest.mark.slow
def test_frame_inferred(self):
# inferred freq
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_is_error_nozeroindex(self):
# GH11858
i = np.array([1, 2, 3])
a = DataFrame(i, index=i)
_check_plot_works(a.plot, xerr=a)
_check_plot_works(a.plot, yerr=a)
def test_nonnumeric_exclude(self):
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}, idx)
fig, ax = self.plt.subplots()
df.plot(ax=ax) # it works
assert len(ax.get_lines()) == 1 # B was plotted
self.plt.close(fig)
pytest.raises(TypeError, df['A'].plot)
@pytest.mark.slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
ts = tm.makeTimeSeries()
f = lambda *args, **kwds: tsplot(s, self.plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
_, ax = self.plt.subplots()
ts.plot(style='k', ax=ax)
color = (0., 0., 0., 1) if self.mpl_ge_2_0_0 else (0., 0., 0.)
assert color == ax.get_lines()[0].get_color()
def test_both_style_and_color(self):
ts = tm.makeTimeSeries()
pytest.raises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
pytest.raises(ValueError, s.plot, style='b-', color='#000099')
@pytest.mark.slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
_, ax = self.plt.subplots()
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot, ax=ax)
def test_get_datevalue(self):
from pandas.plotting._converter import get_datevalue
assert get_datevalue(None, 'D') is None
assert get_datevalue(1987, 'A') == 1987
assert (get_datevalue(Period(1987, 'A'), 'M') ==
Period('1987-12', 'M').ordinal)
assert (get_datevalue('1/1/1987', 'D') ==
Period('1987-1-1', 'D').ordinal)
@pytest.mark.slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
assert expected_string == ax.format_coord(first_x, first_y)
except (ValueError):
pytest.skip("skipping test because issue forming "
"test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3,
freq='A-DEC'))
_, ax = self.plt.subplots()
annual.plot(ax=ax)
check_format_of_first_point(ax, 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and
# changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
daily.plot(ax=ax)
check_format_of_first_point(ax,
't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
_, ax = self.plt.subplots()
from pandas.tseries.plotting import tsplot
tsplot(annual, self.plt.Axes.plot, ax=ax)
check_format_of_first_point(ax, 't = 2014 y = 1.000000')
tsplot(daily, self.plt.Axes.plot, ax=ax)
check_format_of_first_point(ax, 't = 2014-01-01 y = 1.000000')
@pytest.mark.slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@pytest.mark.slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@pytest.mark.slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@pytest.mark.slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@pytest.mark.slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
_, ax = self.plt.subplots()
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ts.plot(ax=ax)
assert not hasattr(ax, 'freq')
@pytest.mark.slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@pytest.mark.slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(
2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@pytest.mark.slow
def test_uhf(self):
import pandas.plotting._converter as conv
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
assert xp == rs
@pytest.mark.slow
def test_irreg_hf(self):
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.iloc[[0, 1, 3, 4]]
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()
_, ax = self.plt.subplots()
df2 = df.copy()
df2.index = df.index.asobject
df2.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
assert (np.fabs(diffs[1:] - sec) < 1e-8).all()
def test_irregular_datetime64_repr_bug(self):
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
_, ax = self.plt.subplots()
ret = ser.plot(ax=ax)
assert ret is not None
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
assert rs == xp
def test_business_freq(self):
bts = tm.makePeriodSeries()
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == 'B'
@pytest.mark.slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == 'M'
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') + timedelta(
minutes=30))
df = DataFrame(np.arange(24), index=idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
rs = ax.get_lines()[0].get_xdata()
assert not Index(rs).is_normalized
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
_, ax = self.plt.subplots()
bts.plot(ax=ax)
idx = ax.get_lines()[0].get_xdata()
tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))
@pytest.mark.slow
def test_axis_limits(self):
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
assert result[0] == xlim[0] - 5
assert result[1] == xlim[1] + 10
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
# datetim
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
fig = ax.get_figure()
self.plt.close(fig)
ser = tm.makeTimeSeries()
_, ax = self.plt.subplots()
ser.plot(ax=ax)
_test(ax)
_, ax = self.plt.subplots()
df = DataFrame({'a': ser, 'b': ser + 1})
df.plot(ax=ax)
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.plotting._converter as conv
assert conv.get_finder('B') == conv._daily_finder
assert conv.get_finder('D') == conv._daily_finder
assert conv.get_finder('M') == conv._monthly_finder
assert conv.get_finder('Q') == conv._quarterly_finder
assert conv.get_finder('A') == conv._annual_finder
assert conv.get_finder('W') == conv._daily_finder
@pytest.mark.slow
def test_finder_daily(self):
xp = Period('1999-1-1', freq='B').ordinal
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
for n in day_lst:
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
self.plt.close(ax.get_figure())
@pytest.mark.slow
def test_finder_quarterly(self):
xp = Period('1988Q1').ordinal
yrs = [3.5, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert rs == xp
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
self.plt.close(ax.get_figure())
@pytest.mark.slow
def test_finder_monthly(self):
xp = Period('Jan 1988').ordinal
yrs = [1.15, 2.5, 4, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert rs == xp
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
self.plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
assert rs == xp
@pytest.mark.slow
def test_finder_annual(self):
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert rs == Period(xp[i], freq='A').ordinal
self.plt.close(ax.get_figure())
@pytest.mark.slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
assert rs == xp
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
assert rs == xp
@pytest.mark.slow
def test_gaps(self):
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
_, ax = self.plt.subplots()
ts.plot(ax=ax)
lines = ax.get_lines()
tm._skip_if_mpl_1_5()
assert len(lines) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
self.plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
self.plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
_, ax = self.plt.subplots()
ser.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
@pytest.mark.slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
_, ax = self.plt.subplots()
low.plot(ax=ax)
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
assert len(lines) == 1
assert len(ax.right_ax.get_lines()) == 1
l = lines[0]
data = l.get_xydata()
tm._skip_if_mpl_1_5()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
@pytest.mark.slow
def test_secondary_y(self):
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == 'right'
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert (ax2.get_yaxis().get_ticks_position() ==
self.default_tick_position)
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
assert not hasattr(ax, 'left_ax')
assert hasattr(ax, 'right_ax')
assert hasattr(ax2, 'left_ax')
assert not hasattr(ax2, 'right_ax')
@pytest.mark.slow
def test_secondary_y_ts(self):
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == 'right'
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert (ax2.get_yaxis().get_ticks_position() ==
self.default_tick_position)
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
@pytest.mark.slow
def test_secondary_kde(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ax = ser.plot(secondary_y=True, kind='density', ax=ax)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == 'right'
@pytest.mark.slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ser.plot(secondary_y=True, kind='bar', ax=ax)
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == 'right'
@pytest.mark.slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == 'right'
assert (axes[1].get_yaxis().get_ticks_position() ==
self.default_tick_position)
assert axes[2].get_yaxis().get_ticks_position() == 'right'
@pytest.mark.slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == 'right'
assert (axes[1].get_yaxis().get_ticks_position() ==
self.default_tick_position)
assert axes[2].get_yaxis().get_ticks_position() == 'right'
def test_mixed_freq_regular_first(self):
# TODO
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
# it works!
s1.plot()
ax2 = s2.plot(style='g')
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
assert idx1.equals(s1.index.to_period('B'))
assert idx2.equals(s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left == pidx[0].ordinal
assert right == pidx[-1].ordinal
@pytest.mark.slow
def test_mixed_freq_irregular_first(self):
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
_, ax = self.plt.subplots()
s2.plot(style='g', ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s1.plot(ax=ax)
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
assert idx1.equals(s1.index.to_period('B'))
assert idx2.equals(s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left == pidx[0].ordinal
assert right == pidx[-1].ordinal
@pytest.mark.slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s2.plot(style='g', ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'D'
@pytest.mark.slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
ts2.plot(style='r', ax=ax)
assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0]
@pytest.mark.slow
def test_mixed_freq_lf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(legend=True, ax=ax)
high.plot(legend=True, ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'D'
leg = ax.get_legend()
assert len(leg.texts) == 2
self.plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'T'
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
ps.plot(ax=ax)
def test_mixed_freq_shared_ax(self):
# GH13341, using sharex=True
idx1 = date_range('2015-01-01', periods=3, freq='M')
idx2 = idx1[:1].union(idx1[2:])
s1 = Series(range(len(idx1)), idx1)
s2 = Series(range(len(idx2)), idx2)
fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert ax1.freq == 'M'
assert ax2.freq == 'M'
assert (ax1.lines[0].get_xydata()[0, 0] ==
ax2.lines[0].get_xydata()[0, 0])
# using twinx
fig, ax1 = self.plt.subplots()
ax2 = ax1.twinx()
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert (ax1.lines[0].get_xydata()[0, 0] ==
ax2.lines[0].get_xydata()[0, 0])
# TODO (GH14330, GH14322)
# plotting the irregular first does not yet work
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# s2.plot(ax=ax1)
# s1.plot(ax=ax2)
# assert (ax1.lines[0].get_xydata()[0, 0] ==
# ax2.lines[0].get_xydata()[0, 0])
def test_nat_handling(self):
_, ax = self.plt.subplots()
dti = DatetimeIndex(['2015-01-01', NaT, '2015-01-03'])
s = Series(range(len(dti)), dti)
s.plot(ax=ax)
xdata = ax.get_lines()[0].get_xdata()
# plot x data is bounded by index values
assert s.index.min() <= Series(xdata).min()
assert Series(xdata).max() <= s.index.max()
@pytest.mark.slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
# tsplot
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
tsplot(high, self.plt.Axes.plot, ax=ax)
lines = tsplot(low, self.plt.Axes.plot, ax=ax)
for l in lines:
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
@pytest.mark.slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
expected_h = idxh.to_period().asi8.astype(np.float64)
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544,
1549, 1553, 1558, 1562], dtype=np.float64)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
else:
tm.assert_numpy_array_equal(xdata, expected_h)
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
tsplot(low, self.plt.Axes.plot, ax=ax)
lines = tsplot(high, self.plt.Axes.plot, ax=ax)
for l in lines:
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
else:
tm.assert_numpy_array_equal(xdata, expected_h)
@pytest.mark.slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
_, ax = self.plt.subplots()
low.plot(kind=kind1, stacked=True, ax=ax)
high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
assert PeriodIndex(l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
# check stacked values are correct
expected_y += low[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
_, ax = self.plt.subplots()
high.plot(kind=kind1, stacked=True, ax=ax)
low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += low[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
@pytest.mark.slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
assert len(ax.get_lines()) == 2
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'L'
tm.close()
# low to high
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
assert len(ax.get_lines()) == 2
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'L'
@pytest.mark.slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].asobject
df = DataFrame(np.random.randn(len(idx), 3), idx)
_, ax = self.plt.subplots()
_check_plot_works(df.plot, ax=ax)
@pytest.mark.slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
_, ax = self.plt.subplots()
df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
assert xp == rs
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
assert xp == rs
@pytest.mark.slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
# TODO: unused?
# us = int((t - int(t)) * 1e6)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S.%f')
assert xp == rs
@pytest.mark.slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
ax = high.plot(secondary_y=True, ax=ax)
for l in ax.get_lines():
assert PeriodIndex(l.get_xdata()).freq == 'D'
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
for l in ax.left_ax.get_lines():
assert PeriodIndex(l.get_xdata()).freq == 'D'
@pytest.mark.slow
def test_secondary_legend(self):
fig = self.plt.figure()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
df.plot(secondary_y=['A', 'B'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == 'A (right)'
assert leg.get_texts()[1].get_text() == 'B (right)'
assert leg.get_texts()[2].get_text() == 'C'
assert leg.get_texts()[3].get_text() == 'D'
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df.plot(secondary_y=['A', 'C'], mark_right=False, ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == 'A'
assert leg.get_texts()[1].get_text() == 'B'
assert leg.get_texts()[2].get_text() == 'C'
assert leg.get_texts()[3].get_text() == 'D'
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind='bar', secondary_y=['A'], ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == 'A (right)'
assert leg.get_texts()[1].get_text() == 'B'
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind='bar', secondary_y=['A'], mark_right=False, ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == 'A'
assert leg.get_texts()[1].get_text() == 'B'
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
# non-ts
df = tm.makeDataFrame()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
assert l.get_rotation() == 30
@pytest.mark.slow
def test_ax_plot(self):
x = DatetimeIndex(start='2012-01-02', periods=10, freq='D')
y = lrange(len(x))
_, ax = self.plt.subplots()
lines = ax.plot(x, y, label='Y')
tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)
@pytest.mark.slow
def test_mpl_nopandas(self):
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
_, ax = self.plt.subplots()
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp)
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)
@pytest.mark.slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
assert left == ts_irregular.index.min().toordinal()
assert right == ts_irregular.index.max().toordinal()
@pytest.mark.slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before == left_after
assert right_before < right_after
@pytest.mark.slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before == left_after
assert right_before < right_after
@pytest.mark.slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
_, ax = self.plt.subplots()
ts.plot(ax=ax)
left_before, right_before = ax.get_xlim()
ts.resample('D').mean().plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
assert left_before == left_after
assert right_before == right_after
@pytest.mark.slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
assert left == ts_irregular.index.min().toordinal()
assert right == ts_irregular.index.max().toordinal()
def test_plot_outofbounds_datetime(self):
# 2579 - checking this does not raise
values = [date(1677, 1, 1), date(1677, 1, 2)]
_, ax = self.plt.subplots()
ax.plot(values)
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
ax.plot(values)
def test_format_timedelta_ticks_narrow(self):
if is_platform_mac():
pytest.skip("skip on mac for precision display issue on older mpl")
expected_labels = [
'00:00:00.00000000{:d}'.format(i)
for i in range(10)]
rng = timedelta_range('0', periods=10, freq='ns')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
fig.canvas.draw()
labels = ax.get_xticklabels()
assert len(labels) == len(expected_labels)
for l, l_expected in zip(labels, expected_labels):
assert l.get_text() == l_expected
def test_format_timedelta_ticks_wide(self):
if is_platform_mac():
pytest.skip("skip on mac for precision display issue on older mpl")
expected_labels = [
'00:00:00',
'1 days 03:46:40',
'2 days 07:33:20',
'3 days 11:20:00',
'4 days 15:06:40',
'5 days 18:53:20',
'6 days 22:40:00',
'8 days 02:26:40',
''
]
rng = timedelta_range('0', periods=10, freq='1 d')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
ax = df.plot(fontsize=2, ax=ax)
fig.canvas.draw()
labels = ax.get_xticklabels()
assert len(labels) == len(expected_labels)
for l, l_expected in zip(labels, expected_labels):
assert l.get_text() == l_expected
def test_timedelta_plot(self):
# test issue #8711
s = Series(range(5), timedelta_range('1day', periods=5))
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test long period
index = timedelta_range('1 day 2 hr 30 min 10 s',
periods=10, freq='1 d')
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test short period
index = timedelta_range('1 day 2 hr 30 min 10 s',
periods=10, freq='1 ns')
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
def test_hist(self):
# https://github.com/matplotlib/matplotlib/issues/8459
rng = date_range('1/1/2011', periods=10, freq='H')
x = rng
w1 = np.arange(0, 1, .1)
w2 = np.arange(0, 1, .1)[::-1]
_, ax = self.plt.subplots()
ax.hist([x, x], weights=[w1, w2])
@pytest.mark.slow
def test_overlapping_datetime(self):
# GB 6608
s1 = Series([1, 2, 3], index=[datetime(1995, 12, 31),
datetime(2000, 12, 31),
datetime(2005, 12, 31)])
s2 = Series([1, 2, 3], index=[datetime(1997, 12, 31),
datetime(2003, 12, 31),
datetime(2008, 12, 31)])
# plot first series, then add the second series to those axes,
# then try adding the first series again
_, ax = self.plt.subplots()
s1.plot(ax=ax)
s2.plot(ax=ax)
s1.plot(ax=ax)
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
plt.close(fig)
|
bsd-3-clause
|
jottenlips/aima-python
|
submissions/aartiste/myKMeans.py
|
13
|
4852
|
from sklearn.cluster import KMeans
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
'''
Make a customn classifier,
'''
km = KMeans(
n_clusters=2,
# max_iter=300,
# n_init=10,
# init='k-means++',
# algorithm='auto',
# precompute_distances='auto',
# tol=1e-4,
# n_jobs=-1,
# random_state=numpy.RandomState,
# verbose=0,
# copy_x=True,
)
Examples = {
'Trump': {
'frame': trumpScaled,
},
'TrumpCustom': {
'frame': trumpScaled,
'kmeans': km
},
}
|
mit
|
crichardson17/starburst_atlas
|
Low_resolution_sims/Dusty_LowRes/Geneva_cont_NoRot/Geneva_cont_NoRot_4/fullgrid/Optical2.py
|
30
|
9412
|
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
line = [56, #AR 4 4740
58, #4861
59, #O III 4959
60, #O 3 5007
61, #N 1 5200
63, #O 1 5577
64, #N 2 5755
65, #HE 1 5876
66, #O 1 6300
67, #S 3 6312
68, #O 1 6363
69, #H 1 6563
70, #N 2 6584
71, #S II 6716
72, #S 2 6720
73] #S II 6731
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty Optical Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_Optical_lines_cntd.pdf')
plt.clf()
print "figure saved"
|
gpl-2.0
|
mdw771/tomosim
|
project.py
|
1
|
4894
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import copy
import glob
import os
import numpy as np
import tomopy
import matplotlib.pyplot as plt
from simulator import *
from util import *
class Project(object):
def __init__(self):
self.simulators = []
self.downsample = [1]
self.dose_local = None
self.dose_tomosaic = None
def add_simuators(self, fname, instrument, type='tiff', center=None, pixel_size=1, downsample=None, fin_angle=180,
max_count=None, **kwargs):
sim = Simulator()
sim.read_raw_sinogram(fname, type=type, center=center, pixel_size=pixel_size, fin_angle=fin_angle,
max_count=max_count, **kwargs)
sim.load_instrument(instrument)
sim.ds = 1
sim.name_ds = '{:d}'.format(sim.ds) if isinstance(sim.ds, int) else '{:.2f}'.format(sim.ds)
self.simulators.append(sim)
if downsample is not None:
for ds in downsample:
sim = copy.deepcopy(self.simulators[0])
temp = downsample_img(sim.raw_sino.sinogram[:, np.newaxis, :], ds, axis=0)
sim.raw_sino.sinogram = np.squeeze(temp)
sim.raw_sino.shape = sim.raw_sino.sinogram.shape
sim.ds = ds
sim.name_ds = '{:d}'.format(sim.ds) if isinstance(sim.ds, int) else '{:.2f}'.format(sim.ds)
print(sim.name_ds)
self.simulators.append(sim)
def process_all_local(self, save_path='data', save_mask=False, mask_ratio=1, offset_intensity=False, fin_angle=180,
allow_read=True, recon=True, **kwargs):
for sim in self.simulators:
sino_path = os.path.join(save_path, 'sino_loc_{:s}x'.format(sim.name_ds))
if len(glob.glob(os.path.join(sino_path, 'sino_loc*'))) == 0:
sim.sample_full_sinogram_local(save_path=sino_path, save_mask=save_mask, fin_angle=fin_angle)
else:
if allow_read:
sim.read_sinos_local(sino_path, fin_angle=fin_angle)
else:
sim.sample_full_sinogram_local(save_path=sino_path, save_mask=save_mask, fin_angle=fin_angle)
if recon:
recon_path = os.path.join(save_path, 'recon_loc_{:s}x'.format(sim.name_ds))
sim.recon_all_local(save_path=recon_path, mask_ratio=mask_ratio, offset_intensity=offset_intensity,
ref_fname=kwargs['ref_fname'])
sim.stitch_all_recons_local(save_path=save_path, fname='recon_local_{:s}x'.format(sim.name_ds))
def process_all_tomosaic(self, save_path='data', mask_ratio=1, fin_angle=180, recon=True):
for sim in self.simulators:
sim.sample_full_sinogram_tomosaic(fin_angle=fin_angle)
if recon:
sim.stitch_all_sinos_tomosaic()
sim.recon_full_tomosaic(save_path=save_path, fname='recon_tomosaic_{:s}x'.format(sim.name_ds),
mask_ratio=mask_ratio)
def estimate_dose(self, energy, sample, flux_rate, exposure):
for sim in self.simulators:
sim.dose_local = sim.estimate_dose(energy, sample, flux_rate, exposure, mode='local')
sim.dose_tomosaic = sim.estimate_dose(energy, sample, flux_rate, exposure, mode='tomosaic')
def calculate_snr(self, save_path='data'):
ref_local = dxchange.read_tiff(os.path.join(save_path, 'recon_local_1x.tiff'))
ref_tomosaic = dxchange.read_tiff(os.path.join(save_path, 'recon_tomosaic_1x.tiff'))
for sim in self.simulators:
if sim.ds not in (1, None):
recon_local = dxchange.read_tiff(os.path.join(save_path, 'recon_local_{:s}x.tiff'.format(sim.name_ds)))
recon_tomosaic = dxchange.read_tiff(os.path.join(save_path, 'recon_tomosaic_{:s}x.tiff'.format(sim.name_ds)))
sim.snr_local = snr(recon_local, ref_local)
sim.snr_tomosaic = snr(recon_tomosaic, ref_tomosaic)
def plot_snr_vs_dose(self):
dose_local = []
dose_tomosaic = []
snr_local = []
snr_tomosaic = []
for sim in self.simulators[1:]:
dose_local.append(sim.dose_local)
dose_tomosaic.append(sim.dose_tomosaic)
snr_local.append(sim.snr_local)
snr_tomosaic.append(sim.snr_tomosaic)
print('Local dose: ', dose_local, 'Local SNR: ', snr_local)
print('Tomosaic dose: ', dose_tomosaic, 'Tomosaic SNR: ', snr_tomosaic)
plt.figure()
plt.semilogx(dose_local, snr_local, label='Local')
plt.semilogx(dose_tomosaic, snr_tomosaic, label='Tomosaic')
plt.legend()
plt.xlabel('Dose (J/m$^2$)')
plt.ylabel('SNR')
plt.savefig('data/snr_vs_dose.pdf', format='pdf')
|
apache-2.0
|
bgyori/indra
|
indra/tests/test_belief_sklearn.py
|
3
|
12725
|
import random
import pickle
import numpy as np
from copy import copy
from collections import defaultdict
from os.path import join, abspath, dirname
from nose.tools import raises
from sklearn.linear_model import LogisticRegression
from indra.sources import signor
from indra.belief import BeliefEngine
from indra.tools import assemble_corpus as ac
from indra.statements import Evidence
from indra.belief.skl import CountsScorer
# A set of test statements derived from SIGNOR only
# (these include many different stmt types)
test_stmt_path = join(dirname(abspath(__file__)),
'belief_sklearn_test_stmts.pkl')
# An alternative set of test statements derived from the curated stmt dataset
# (these include supports/supported_by)
test_stmt_cur_path = join(dirname(abspath(__file__)),
'belief_sklearn_test_stmts_cur.pkl')
# A statement dataframe sample
test_df_path = join(dirname(abspath(__file__)),
'belief_sklearn_test_df.pkl')
with open(test_stmt_path, 'rb') as f:
test_stmts, y_arr_stmts = pickle.load(f)
with open(test_stmt_cur_path, 'rb') as f:
test_stmts_cur, y_arr_stmts_cur = pickle.load(f)
with open(test_df_path, 'rb') as f:
test_df, y_arr_df = pickle.load(f)
# A set of statements derived from Signor used for testing purposes.
def _dump_test_data(filename, num_per_type=10):
"""Get corpus of statements for testing that has a range of stmt types."""
sp = signor.process_from_web()
# Group statements by type
stmts_by_type = defaultdict(list)
for stmt in sp.statements:
stmts_by_type[stmt.__class__].append(stmt)
# Sample statements of each type (without replacement)
stmt_sample = []
for stmt_type, stmt_list in stmts_by_type.items():
if len(stmt_list) <= num_per_type:
stmt_sample.extend(stmt_list)
else:
stmt_sample.extend(random.sample(stmt_list, num_per_type))
# Make a random binary class vector for the stmt list
y_arr = [random.choice((0, 1)) for s in stmt_sample]
with open(test_stmt_path, 'wb') as f:
pickle.dump((stmt_sample, y_arr), f)
return stmt_sample
def test_counts_wrapper():
"""Instantiate counts wrapper and make stmt matrix"""
lr = LogisticRegression()
source_list = ['reach', 'sparser']
cw = CountsScorer(lr, source_list)
# Made this so it's not a ValueError, this may change back in the future
# depending on how we want to handle sources in statement data not seen
# in training.
# @raises(ValueError)
def test_missing_source():
"""Check that all source_apis in training data are in source list."""
lr = LogisticRegression()
source_list = ['reach', 'sparser']
cw = CountsScorer(lr, source_list)
# Should error because test stmts are from signor and signor
# is not in list
cw.stmts_to_matrix(test_stmts)
def test_stmts_to_matrix():
"""Check that all source_apis in training data are in source list."""
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list)
x_arr = cw.stmts_to_matrix(test_stmts)
assert isinstance(x_arr, np.ndarray), 'x_arr should be a numpy array'
assert x_arr.shape == (len(test_stmts), len(source_list)), \
'stmt matrix dimensions should match test stmts'
assert set(x_arr.sum(axis=0)) == set([0, 0, len(test_stmts)]), \
'Signor col should be 1 in every row, other cols 0.'
# Try again with statement type
cw = CountsScorer(lr, source_list, use_stmt_type=True)
num_types = len(cw.stmt_type_map)
x_arr = cw.stmts_to_matrix(test_stmts)
assert x_arr.shape == (len(test_stmts), len(source_list) + num_types), \
'matrix should have a col for sources and other cols for every ' \
'statement type.'
def test_fit_stmts():
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list)
cw.fit(test_stmts, y_arr_stmts)
# Once the model is fit, the coef_ attribute should be defined
assert 'coef_' in cw.model.__dict__
def test_fit_stmts_predict_stmts():
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list)
cw.fit(test_stmts, y_arr_stmts)
probs = cw.predict_proba(test_stmts)
assert probs.shape == (len(test_stmts), 2), \
'prediction results should have dimension (# stmts, # classes)'
log_probs = cw.predict_log_proba(test_stmts)
assert log_probs.shape == (len(test_stmts), 2), \
'prediction results should have dimension (# stmts, # classes)'
preds = cw.predict(test_stmts)
assert preds.shape == (len(test_stmts),), \
'prediction results should have dimension (# stmts)'
@raises(ValueError)
def test_check_df_cols_err():
"""Drop a required column and make sure we get a ValueError."""
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list)
cw.df_to_matrix(test_df.drop('stmt_type', axis=1))
def test_check_df_cols_noerr():
"""Test dataframe should not raise ValueError."""
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list)
cw.df_to_matrix(test_df)
def test_df_to_matrix():
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list)
x_arr = cw.df_to_matrix(test_df)
assert isinstance(x_arr, np.ndarray), 'x_arr should be a numpy array'
assert x_arr.shape == (len(test_df), len(source_list)), \
'stmt matrix dimensions should match test stmts'
assert x_arr.shape == (len(test_df), len(source_list))
# Try again with statement type
cw = CountsScorer(lr, source_list, use_stmt_type=True)
num_types = len(cw.stmt_type_map)
x_arr = cw.df_to_matrix(test_df)
assert x_arr.shape == (len(test_df), len(source_list) + num_types), \
'matrix should have a col for sources and other cols for every ' \
'statement type.'
def test_fit_df():
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'medscan', 'trips', 'rlimsp']
cw = CountsScorer(lr, source_list)
cw.fit(test_df, y_arr_df)
# Once the model is fit, the coef_ attribute should be defined
assert 'coef_' in cw.model.__dict__
def test_fit_stmts_pred_df():
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list)
# Train on statement data
cw.fit(test_stmts, y_arr_stmts)
# Predict on DF data
probs = cw.predict_proba(test_df)
assert probs.shape == (len(test_df), 2), \
'prediction results should have dimension (# stmts, # classes)'
log_probs = cw.predict_log_proba(test_df)
assert log_probs.shape == (len(test_df), 2), \
'prediction results should have dimension (# stmts, # classes)'
preds = cw.predict(test_df)
assert preds.shape == (len(test_df),), \
'prediction results should have dimension (# stmts)'
def test_fit_df_pred_stmts():
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list)
# Train on statement data
cw.fit(test_df, y_arr_df)
# Predict on DF data
probs = cw.predict_proba(test_stmts)
assert probs.shape == (len(test_stmts), 2), \
'prediction results should have dimension (# stmts, # classes)'
log_probs = cw.predict_log_proba(test_stmts)
assert log_probs.shape == (len(test_stmts), 2), \
'prediction results should have dimension (# stmts, # classes)'
preds = cw.predict(test_stmts)
assert preds.shape == (len(test_stmts),), \
'prediction results should have dimension (# stmts)'
@raises(ValueError)
def test_check_missing_source_counts():
lr = LogisticRegression()
source_list = ['reach', 'sparser']
cw = CountsScorer(lr, source_list)
# Drop the source_counts column
df_no_sc = test_df.drop('source_counts', axis=1)
# Should error
cw.fit(df_no_sc, y_arr_df)
def test_check_source_columns():
lr = LogisticRegression()
source_list = ['reach', 'sparser']
cw = CountsScorer(lr, source_list)
# Drop the source_counts column
df_sc = test_df.drop('source_counts', axis=1)
# Add reach and sparser columns
df_sc['reach'] = 0
df_sc['sparser'] = 0
# Should not error
cw.fit(df_sc, y_arr_df)
def test_matrix_to_matrix():
"""Check that we get a matrix back when passed to to_matrix."""
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list)
# Train on statement data
stmt_arr = cw.to_matrix(test_df)
assert cw.to_matrix(stmt_arr) is stmt_arr, \
'If passed a numpy array to_matrix should return it back.'
@raises(ValueError)
def test_use_members_with_df():
"""Check that we can't set use_num_members when passing a DataFrame."""
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list, use_num_members=True)
# This should error because stmt DataFrame doesn't contain num_members
# info
stmt_arr = cw.to_matrix(test_df)
def test_use_members_with_stmts():
"""Check that we can set use_num_members when passing statements."""
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cw = CountsScorer(lr, source_list, use_num_members=True)
x_arr = cw.to_matrix(test_stmts)
assert x_arr.shape == (len(test_stmts), len(source_list)+1), \
'stmt matrix dimensions should match test stmts plus num_members'
def setup_belief():
# Make a model
lr = LogisticRegression()
# Get all the sources
source_list = CountsScorer.get_all_sources(test_stmts_cur)
cs = CountsScorer(lr, source_list)
# Train on curated stmt data
cs.fit(test_stmts_cur, y_arr_stmts_cur)
# Run predictions on test statements
probs = cs.predict_proba(test_stmts_cur)[:, 1]
# Now check if we get these same beliefs set on the statements when we
# run with the belief engine:
# Get scorer and belief engine instances for trained model
be = BeliefEngine(scorer=cs)
# Make a shallow copy of the test stmts so that we don't change beliefs
# of the global instances as a side-effect of this test
test_stmts_copy = copy(test_stmts_cur)
return be, test_stmts_copy, probs
def test_set_prior_probs():
# Get probs for a set of statements, and a belief engine instance
be, test_stmts_copy, probs = setup_belief()
# Set beliefs
be.set_prior_probs(test_stmts_copy)
beliefs = [s.belief for s in test_stmts_copy]
# Check that they match
assert np.allclose(beliefs, probs), \
"Statement beliefs should be set to predicted probabilities."
@raises(NotImplementedError)
def test_df_extra_ev_value_error():
"""to_matrix should raise NotImplementError if given a DataFrame and extra
evidence (for now)."""
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cs = CountsScorer(lr, source_list)
cs.to_matrix(test_df, extra_evidence=[[5]])
@raises(ValueError)
def test_extra_evidence_length():
"""Should raise ValueError because the extra_evidence list is not the
same length as the list of statements."""
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cs = CountsScorer(lr, source_list)
extra_ev = [[5]]
x_arr = cs.stmts_to_matrix(test_stmts, extra_evidence=extra_ev)
@raises(ValueError)
def test_extra_evidence_content():
"""Should raise ValueError if extra_evidence list entries are not
Evidence objects or empty lists."""
lr = LogisticRegression()
source_list = ['reach', 'sparser', 'signor']
cs = CountsScorer(lr, source_list)
extra_ev = ([[5]] * (len(test_stmts) - 1)) + [[]]
x_arr = cs.stmts_to_matrix(test_stmts, extra_evidence=extra_ev)
def test_set_hierarchy_probs():
# Get probs for a set of statements, and a belief engine instance
be, test_stmts_copy, prior_probs = setup_belief()
# Set beliefs on the flattened statements
top_level = ac.filter_top_level(test_stmts_copy)
be.set_hierarchy_probs(test_stmts_copy)
# Compare hierarchy probs to prior probs
for stmt, prior_prob in zip(test_stmts_copy, prior_probs):
# Check that the top-level statements beliefs have not changed
if stmt in top_level:
assert stmt.belief == prior_prob
# We expect the belief to change if including more evidence
else:
assert stmt.belief != prior_prob
|
bsd-2-clause
|
rmcgibbo/scipy
|
scipy/interpolate/ndgriddata.py
|
45
|
7161
|
"""
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
self.tree = cKDTree(self.points)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
|
bsd-3-clause
|
chugunovyar/factoryForBuild
|
env/lib/python2.7/site-packages/matplotlib/cbook.py
|
6
|
83371
|
"""
A collection of utility functions and classes. Originally, many
(but not all) were from the Python Cookbook -- hence the name cbook.
This module is safe to import from anywhere within matplotlib;
it imports matplotlib only at runtime.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
from itertools import repeat
import collections
import datetime
import errno
from functools import reduce
import glob
import gzip
import io
import locale
import os
import re
import sys
import time
import traceback
import types
import warnings
from weakref import ref, WeakKeyDictionary
import numpy as np
import numpy.ma as ma
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
mplDeprecation = MatplotlibDeprecationWarning
def _generate_deprecation_message(since, message='', name='',
alternative='', pending=False,
obj_type='attribute'):
if not message:
altmessage = ''
if pending:
message = (
'The %(func)s %(obj_type)s will be deprecated in a '
'future version.')
else:
message = (
'The %(func)s %(obj_type)s was deprecated in version '
'%(since)s.')
if alternative:
altmessage = ' Use %s instead.' % alternative
message = ((message % {
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type,
'since': since}) +
altmessage)
return message
def warn_deprecated(
since, message='', name='', alternative='', pending=False,
obj_type='attribute'):
"""
Used to display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
Examples
--------
Basic example::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
warnings.warn(message, mplDeprecation, stacklevel=1)
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type='function'):
"""
Decorator to mark a function as deprecated.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(func, message=message, name=name, alternative=alternative,
pending=pending):
import functools
import textwrap
if isinstance(func, classmethod):
func = func.__func__
is_classmethod = True
else:
is_classmethod = False
if not name:
name = func.__name__
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
@functools.wraps(func)
def deprecated_func(*args, **kwargs):
warnings.warn(message, mplDeprecation, stacklevel=2)
return func(*args, **kwargs)
old_doc = deprecated_func.__doc__
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
message = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
deprecated_func.__doc__ = new_doc
if is_classmethod:
deprecated_func = classmethod(deprecated_func)
return deprecated_func
return deprecate
# On some systems, locale.getpreferredencoding returns None,
# which can break unicode; and the sage project reports that
# some systems have incorrect locale specifications, e.g.,
# an encoding instead of a valid locale name. Another
# pathological case that has been reported is an empty string.
# On some systems, getpreferredencoding sets the locale, which has
# side effects. Passing False eliminates those side effects.
def unicode_safe(s):
import matplotlib
if isinstance(s, bytes):
try:
preferredencoding = locale.getpreferredencoding(
matplotlib.rcParams['axes.formatter.use_locale']).strip()
if not preferredencoding:
preferredencoding = None
except (ValueError, ImportError, AttributeError):
preferredencoding = None
if preferredencoding is None:
return six.text_type(s)
else:
return six.text_type(s, preferredencoding)
return s
class converter(object):
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s == self.missing:
return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s == self.missing
class tostr(converter):
"""convert to string or None"""
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
"""convert to a datetime or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
"""convert to a date or None"""
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
"""use a :func:`time.strptime` format string for conversion"""
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
"""convert to a float or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return float(s)
class toint(converter):
"""convert to an int or None"""
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return int(s)
class _BoundMethodProxy(object):
"""
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by Michael Droettboom
"""
def __init__(self, cb):
self._hash = hash(cb)
self._destroy_callbacks = []
try:
try:
if six.PY3:
self.inst = ref(cb.__self__, self._destroy)
else:
self.inst = ref(cb.im_self, self._destroy)
except TypeError:
self.inst = None
if six.PY3:
self.func = cb.__func__
self.klass = cb.__self__.__class__
else:
self.func = cb.im_func
self.klass = cb.im_class
except AttributeError:
self.inst = None
self.func = cb
self.klass = None
def add_destroy_callback(self, callback):
self._destroy_callbacks.append(_BoundMethodProxy(callback))
def _destroy(self, wk):
for callback in self._destroy_callbacks:
try:
callback(self)
except ReferenceError:
pass
def __getstate__(self):
d = self.__dict__.copy()
# de-weak reference inst
inst = d['inst']
if inst is not None:
d['inst'] = inst()
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
inst = statedict['inst']
# turn inst back into a weakref
if inst is not None:
self.inst = ref(inst)
def __call__(self, *args, **kwargs):
"""
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
"""
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the
# instance
mtd = types.MethodType(self.func, self.inst())
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
"""
Compare the held function and instance with that held by
another proxy.
"""
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
"""
Inverse of __eq__.
"""
return not self.__eq__(other)
def __hash__(self):
return self._hash
class CallbackRegistry(object):
"""
Handle registering and disconnecting for a set of signals and
callbacks:
>>> def oneat(x):
... print('eat', x)
>>> def ondrink(x):
... print('drink', x)
>>> from matplotlib.cbook import CallbackRegistry
>>> callbacks = CallbackRegistry()
>>> id_eat = callbacks.connect('eat', oneat)
>>> id_drink = callbacks.connect('drink', ondrink)
>>> callbacks.process('drink', 123)
drink 123
>>> callbacks.process('eat', 456)
eat 456
>>> callbacks.process('be merry', 456) # nothing will be called
>>> callbacks.disconnect(id_eat)
>>> callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they
are no longer needed to avoid dangling references (and thus memory
leaks). However, real code in matplotlib rarely does so, and due
to its design, it is rather difficult to place this kind of code.
To get around this, and prevent this class of memory leaks, we
instead store weak references to bound methods only, so when the
destination object needs to die, the CallbackRegistry won't keep
it alive. The Python stdlib weakref module can not create weak
references to bound methods directly, so we need to create a proxy
object to handle weak references to bound methods (or regular free
functions). This technique was shared by Peter Parente on his
`"Mindtrove" blog
<http://mindtrove.info/python-weak-references/>`_.
"""
def __init__(self):
self.callbacks = dict()
self._cid = 0
self._func_cid_map = {}
def __getstate__(self):
# We cannot currently pickle the callables in the registry, so
# return an empty dictionary.
return {}
def __setstate__(self, state):
# re-initialise an empty callback registry
self.__init__()
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._func_cid_map.setdefault(s, WeakKeyDictionary())
# Note proxy not needed in python 3.
# TODO rewrite this when support for python2.x gets dropped.
proxy = _BoundMethodProxy(func)
if proxy in self._func_cid_map[s]:
return self._func_cid_map[s][proxy]
proxy.add_destroy_callback(self._remove_proxy)
self._cid += 1
cid = self._cid
self._func_cid_map[s][proxy] = cid
self.callbacks.setdefault(s, dict())
self.callbacks[s][cid] = proxy
return cid
def _remove_proxy(self, proxy):
for signal, proxies in list(six.iteritems(self._func_cid_map)):
try:
del self.callbacks[signal][proxies[proxy]]
except KeyError:
pass
if len(self.callbacks[signal]) == 0:
del self.callbacks[signal]
del self._func_cid_map[signal]
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in list(six.iteritems(self.callbacks)):
try:
del callbackd[cid]
except KeyError:
continue
else:
for signal, functions in list(
six.iteritems(self._func_cid_map)):
for function, value in list(six.iteritems(functions)):
if value == cid:
del functions[function]
return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
if s in self.callbacks:
for cid, proxy in list(six.iteritems(self.callbacks[s])):
try:
proxy(*args, **kwargs)
except ReferenceError:
self._remove_proxy(proxy)
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a given type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None:
self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return repr(self)
def __getstate__(self):
# store a dictionary of this SilentList's state
return {'type': self.type, 'seq': self[:]}
def __setstate__(self, state):
self.type = state['type']
self.extend(state['seq'])
class IgnoredKeywordWarning(UserWarning):
"""
A class for issuing warnings about keyword arguments that will be ignored
by matplotlib
"""
pass
def local_over_kwdict(local_var, kwargs, *keys):
"""
Enforces the priority of a local variable over potentially conflicting
argument(s) from a kwargs dict. The following possible output values are
considered in order of priority:
local_var > kwargs[keys[0]] > ... > kwargs[keys[-1]]
The first of these whose value is not None will be returned. If all are
None then None will be returned. Each key in keys will be removed from the
kwargs dict in place.
Parameters
----------
local_var: any object
The local variable (highest priority)
kwargs: dict
Dictionary of keyword arguments; modified in place
keys: str(s)
Name(s) of keyword arguments to process, in descending order of
priority
Returns
-------
out: any object
Either local_var or one of kwargs[key] for key in keys
Raises
------
IgnoredKeywordWarning
For each key in keys that is removed from kwargs but not used as
the output value
"""
out = local_var
for key in keys:
kwarg_val = kwargs.pop(key, None)
if kwarg_val is not None:
if out is None:
out = kwarg_val
else:
warnings.warn('"%s" keyword argument will be ignored' % key,
IgnoredKeywordWarning)
return out
def strip_math(s):
"""remove latex formatting from mathtext"""
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove:
s = s.replace(r, '')
return s
class Bunch(object):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables::
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: https://code.activestate.com/recipes/121294/
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
keys = six.iterkeys(self.__dict__)
return 'Bunch(%s)' % ', '.join(['%s=%s' % (k, self.__dict__[k])
for k
in keys])
def unique(x):
"""Return a list of unique elements of *x*"""
return list(six.iterkeys(dict([(val, 1) for val in x])))
def iterable(obj):
"""return true if *obj* is iterable"""
try:
iter(obj)
except TypeError:
return False
return True
def is_string_like(obj):
"""Return True if *obj* looks like a string"""
if isinstance(obj, six.string_types):
return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try:
obj + ''
except:
return False
return True
def is_sequence_of_strings(obj):
"""Returns true if *obj* is iterable and contains strings"""
if not iterable(obj):
return False
if is_string_like(obj) and not isinstance(obj, np.ndarray):
try:
obj = obj.values
except AttributeError:
# not pandas
return False
for o in obj:
if not is_string_like(o):
return False
return True
def is_hashable(obj):
"""Returns true if *obj* can be hashed"""
try:
hash(obj)
except TypeError:
return False
return True
def is_writable_file_like(obj):
"""return true if *obj* looks like a file object with a *write* method"""
return hasattr(obj, 'write') and six.callable(obj.write)
def file_requires_unicode(x):
"""
Returns `True` if the given writable file-like object requires Unicode
to be written to it.
"""
try:
x.write(b'')
except TypeError:
return True
else:
return False
def is_scalar(obj):
"""return true if *obj* is not string like and is not iterable"""
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
"""return true if *obj* looks like a number"""
try:
obj + 1
except:
return False
else:
return True
def to_filehandle(fname, flag='rU', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
# get rid of 'U' in flag for gzipped files.
flag = flag.replace('U', '')
fh = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
# get rid of 'U' in flag for bz2 files
flag = flag.replace('U', '')
import bz2
fh = bz2.BZ2File(fname, flag)
else:
fh = open(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
"""Return whether the given object is a scalar or string like."""
return is_string_like(val) or not iterable(val)
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def get_sample_data(fname, asfileobj=True):
"""
Return a sample data file. *fname* is a path relative to the
`mpl-data/sample_data` directory. If *asfileobj* is `True`
return a file object, otherwise just a file path.
Set the rc parameter examples.directory to the directory where we should
look, if sample_data files are stored in a location different than
default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
Python module files).
If the filename ends in .gz, the file is implicitly ungzipped.
"""
import matplotlib
if matplotlib.rcParams['examples.directory']:
root = matplotlib.rcParams['examples.directory']
else:
root = os.path.join(matplotlib._get_data_path(), 'sample_data')
path = os.path.join(root, fname)
if asfileobj:
if (os.path.splitext(fname)[-1].lower() in
('.csv', '.xrc', '.txt')):
mode = 'r'
else:
mode = 'rb'
base, ext = os.path.splitext(fname)
if ext == '.gz':
return gzip.open(path, mode)
else:
return open(path, mode)
else:
return path
def flatten(seq, scalarp=is_scalar_or_string):
"""
Returns a generator of flattened nested containers
For example:
>>> from matplotlib.cbook import flatten
>>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
>>> print(list(flatten(l)))
['John', 'Hunter', 1, 23, 42, 5, 23]
By: Composite of Holger Krekel and Luther Blissett
From: https://code.activestate.com/recipes/121294/
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item):
yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter(object):
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace:
data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i], attributename), i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print(multiple_replace(adict, text))
xlat = Xlator(adict)
print(xlat.xlat(text))
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, list(six.iterkeys(self)))))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc:
fc = c # Remember first letter
d = soundex_digits[ord(c) - ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null(object):
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return "Null()"
def __repr__(self):
return "Null()"
if six.PY3:
def __bool__(self):
return 0
else:
def __nonzero__(self):
return 0
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def mkdirs(newdir, mode=0o777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
# this functionality is now in core python as of 3.2
# LPY DROP
if six.PY3:
os.makedirs(newdir, mode=mode, exist_ok=True)
else:
try:
os.makedirs(newdir, mode=mode)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class GetRealpathAndStat(object):
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
"""delete all of the *keys* from the :class:`dict` *d*"""
for key in keys:
try:
del d[key]
except KeyError:
pass
class RingBuffer(object):
""" class that implements a not-yet-full buffer """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
.
"""
s_len = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, xrange(len(seq))):
s_len += len(word) + 1 # +1 to account for the len(' ')
if s_len >= N:
return ind
return len(seq)
def wrap(prefix, text, cols):
"""wrap *text* with *prefix* at length *cols*"""
pad = ' ' * len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind < Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path
import fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
results = []
for dirname, dirs, files in os.walk(root):
# Append to results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if return_folders or os.path.isfile(fullname):
for pattern in pattern_list:
if fnmatch.fnmatch(name, pattern):
results.append(fullname)
break
# Block recursion if recursion was disallowed
if not recurse:
break
return results
def get_recursive_filelist(args):
"""
Recurse all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"""Break up the *seq* into *num* tuples"""
start = 0
while 1:
item = seq[start:start + num]
if not len(item):
break
yield item
start += num
def exception_to_str(s=None):
if six.PY3:
sh = io.StringIO()
else:
sh = io.BytesIO()
if s is not None:
print(s, file=sh)
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq) < 2:
return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val:
return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if not val:
return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if val:
return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to constrain the size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if k not in self:
if len(self) >= self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
self._killkeys.append(k)
dict.__setitem__(self, k, v)
class Stack(object):
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
"""return the current element, or None"""
if not len(self._elements):
return self._default
else:
return self._elements[self._pos]
def __len__(self):
return self._elements.__len__()
def __getitem__(self, ind):
return self._elements.__getitem__(ind)
def forward(self):
"""move the position forward and return the current element"""
n = len(self._elements)
if self._pos < n - 1:
self._pos += 1
return self()
def back(self):
"""move the position back and return the current element"""
if self._pos > 0:
self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos + 1]
self._elements.append(o)
self._pos = len(self._elements) - 1
return self()
def home(self):
"""push the first element onto the top of the stack"""
if not len(self._elements):
return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements) == 0
def clear(self):
"""empty the stack"""
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso == o:
bubbles.append(thiso)
else:
self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso == o:
continue
else:
self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)):
seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name, name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o)
if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match) >= 0]
def reverse_dict(d):
"""reverse the dictionary -- may lose data if values are not unique!"""
return dict([(v, k) for k, v in six.iteritems(d)])
def restrict_dict(d, keys):
"""
Return a dictionary that contains those keys that appear in both
d and keys, with values from d.
"""
return dict([(k, v) for (k, v) in six.iteritems(d) if k in keys])
def report_memory(i=0): # argument may go away
"""return the memory consumed by process"""
from matplotlib.compat.subprocess import Popen, PIPE
pid = os.getpid()
if sys.platform == 'sunos5':
try:
a2 = Popen(str('ps -p %d -o osz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Sun OS only if "
"the 'ps' program is found")
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
try:
a2 = Popen(str('ps -p %d -o rss,sz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Linux only if "
"the 'ps' program is found")
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
try:
a2 = Popen(str('ps -p %d -o rss,vsz') % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Mac OS only if "
"the 'ps' program is found")
mem = int(a2[1].split()[0])
elif sys.platform.startswith('win'):
try:
a2 = Popen([str("tasklist"), "/nh", "/fi", "pid eq %d" % pid],
stdout=PIPE).stdout.read()
except OSError:
raise NotImplementedError(
"report_memory works on Windows only if "
"the 'tasklist' program is found")
mem = int(a2.strip().split()[-2].replace(',', ''))
else:
raise NotImplementedError(
"We don't have a memory monitor for %s" % sys.platform)
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
"""make sure *args* are equal len before zipping"""
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
return list(zip(*args))
def issubclass_safe(x, klass):
"""return issubclass(x, klass) and return False on a TypeError"""
try:
return issubclass(x, klass)
except TypeError:
return False
def safe_masked_invalid(x, copy=False):
x = np.array(x, subok=True, copy=copy)
if not x.dtype.isnative:
# Note that the argument to `byteswap` is 'inplace',
# thus if we have already made a copy, do the byteswap in
# place, else make a copy with the byte order swapped.
# Be explicit that we are swapping the byte order of the dtype
x = x.byteswap(copy).newbyteorder('S')
try:
xm = np.ma.masked_invalid(x, copy=False)
xm.shrink_mask()
except TypeError:
return x
return xm
class MemoryMonitor(object):
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n / segments)
ii = list(xrange(0, n, dn))
ii[-1] = n - 1
print()
print('memory report: i, mem, dmem, dmem/nloops')
print(0, self._mem[0])
for i in range(1, len(ii)):
di = ii[i] - ii[i - 1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i - 1]]
print('%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di)))
if self._overflow:
print("Warning: array size was too small for the number of calls.")
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from .pylab import figure
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in six.iteritems(step):
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, {}, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
For example:
>>> from matplotlib.cbook import Grouper
>>> class Foo(object):
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> sorted(map(tuple, grp))
[(a, b, c), (d, e)]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=()):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
to_drop = [key for key in mapping if key() is None]
for key in to_drop:
val = mapping.pop(key)
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def remove(self, a):
self.clean()
mapping = self._mapping
seta = mapping.pop(ref(a), None)
if seta is not None:
seta.remove(ref(a))
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token:
pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in six.itervalues(self._mapping):
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in six.itervalues(self._mapping):
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
if steps == 1:
return a
steps = int(np.floor(steps))
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1:]
delta = ((a1 - a0) / steps)
for i in range(1, steps):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in (glob.glob(os.path.join(path, '*')) +
glob.glob(os.path.join(path, '.*'))):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: # Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None,
autorange=False):
"""
Returns list of dictionaries of statistics used to draw a series
of box and whisker plots. The `Returns` section enumerates the
required keys of the dictionary. Users can skip this function and
pass a user-defined set of dictionaries to the new `axes.bxp` method
instead of relying on MPL to do the calculations.
Parameters
----------
X : array-like
Data that will be represented in the boxplots. Should have 2 or
fewer dimensions.
whis : float, string, or sequence (default = 1.5)
As a float, determines the reach of the whiskers past the first
and third quartiles (e.g., Q3 + whis*IQR, QR = interquartile
range, Q3-Q1). Beyond the whiskers, data are considered outliers
and are plotted as individual points. This can be set this to an
ascending sequence of percentile (e.g., [5, 95]) to set the
whiskers at specific percentiles of the data. Finally, `whis`
can be the string ``'range'`` to force the whiskers to the
minimum and maximum of the data. In the edge case that the 25th
and 75th percentiles are equivalent, `whis` can be automatically
set to ``'range'`` via the `autorange` option.
bootstrap : int, optional
Number of times the confidence intervals around the median
should be bootstrapped (percentile method).
labels : array-like, optional
Labels for each dataset. Length must be compatible with
dimensions of `X`.
autorange : bool, optional (False)
When `True` and the data are distributed such that the 25th and
75th percentiles are equal, ``whis`` is set to ``'range'`` such
that the whisker ends are at the minimum and maximum of the
data.
Returns
-------
bxpstats : list of dict
A list of dictionaries containing the results for each column
of data. Keys of each dictionary are the following:
======== ===================================
Key Value Description
======== ===================================
label tick label for the boxplot
mean arithemetic mean value
med 50th percentile
q1 first quartile (25th percentile)
q3 third quartile (75th percentile)
cilo lower notch around the median
cihi upper notch around the median
whislo end of the lower whisker
whishi end of the upper whisker
fliers outliers
======== ===================================
Notes
-----
Non-bootstrapping approach to confidence interval uses Gaussian-
based asymptotic approximation:
.. math::
\mathrm{med} \pm 1.57 \\times \\frac{\mathrm{iqr}}{\sqrt{N}}
General approach from:
McGill, R., Tukey, J.W., and Larsen, W.A. (1978) "Variations of
Boxplots", The American Statistician, 32:12-16.
"""
def _bootstrap_median(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentiles = [2.5, 97.5]
ii = np.random.randint(M, size=(N, M))
bsData = x[ii]
estimate = np.median(bsData, axis=1, overwrite_input=True)
CI = np.percentile(estimate, percentiles)
return CI
def _compute_conf_interval(data, med, iqr, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = _bootstrap_median(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
N = len(data)
notch_min = med - 1.57 * iqr / np.sqrt(N)
notch_max = med + 1.57 * iqr / np.sqrt(N)
return notch_min, notch_max
# output is a list of dicts
bxpstats = []
# convert X to a list of lists
X = _reshape_2D(X)
ncols = len(X)
if labels is None:
labels = repeat(None)
elif len(labels) != ncols:
raise ValueError("Dimensions of labels and X must be compatible")
input_whis = whis
for ii, (x, label) in enumerate(zip(X, labels), start=0):
# empty dict
stats = {}
if label is not None:
stats['label'] = label
# restore whis to the input values in case it got changed in the loop
whis = input_whis
# note tricksyness, append up here and then mutate below
bxpstats.append(stats)
# if empty, bail
if len(x) == 0:
stats['fliers'] = np.array([])
stats['mean'] = np.nan
stats['med'] = np.nan
stats['q1'] = np.nan
stats['q3'] = np.nan
stats['cilo'] = np.nan
stats['cihi'] = np.nan
stats['whislo'] = np.nan
stats['whishi'] = np.nan
stats['med'] = np.nan
continue
# up-convert to an array, just to be safe
x = np.asarray(x)
# arithmetic mean
stats['mean'] = np.mean(x)
# medians and quartiles
q1, med, q3 = np.percentile(x, [25, 50, 75])
# interquartile range
stats['iqr'] = q3 - q1
if stats['iqr'] == 0 and autorange:
whis = 'range'
# conf. interval around median
stats['cilo'], stats['cihi'] = _compute_conf_interval(
x, med, stats['iqr'], bootstrap
)
# lowest/highest non-outliers
if np.isscalar(whis):
if np.isreal(whis):
loval = q1 - whis * stats['iqr']
hival = q3 + whis * stats['iqr']
elif whis in ['range', 'limit', 'limits', 'min/max']:
loval = np.min(x)
hival = np.max(x)
else:
whismsg = ('whis must be a float, valid string, or '
'list of percentiles')
raise ValueError(whismsg)
else:
loval = np.percentile(x, whis[0])
hival = np.percentile(x, whis[1])
# get high extreme
wiskhi = np.compress(x <= hival, x)
if len(wiskhi) == 0 or np.max(wiskhi) < q3:
stats['whishi'] = q3
else:
stats['whishi'] = np.max(wiskhi)
# get low extreme
wisklo = np.compress(x >= loval, x)
if len(wisklo) == 0 or np.min(wisklo) > q1:
stats['whislo'] = q1
else:
stats['whislo'] = np.min(wisklo)
# compute a single array of outliers
stats['fliers'] = np.hstack([
np.compress(x < stats['whislo'], x),
np.compress(x > stats['whishi'], x)
])
# add in the remaining stats
stats['q1'], stats['med'], stats['q3'] = q1, med, q3
return bxpstats
# FIXME I don't think this is used anywhere
def unmasked_index_ranges(mask, compressed=True):
"""
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
"""
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
# The ls_mapper maps short codes for line style to their full name used
# by backends
# The reverse mapper is for mapping full names to short ones
ls_mapper_r = dict([(ls[1], ls[0]) for ls in _linestyles])
def align_iterators(func, *iterables):
"""
This generator takes a bunch of iterables that are ordered by func
It sends out ordered tuples::
(func(row), [rows from all iterators matching func(row)])
It is used by :func:`matplotlib.mlab.recs_join` to join record arrays
"""
class myiter:
def __init__(self, it):
self.it = it
self.key = self.value = None
self.iternext()
def iternext(self):
try:
self.value = next(self.it)
self.key = func(self.value)
except StopIteration:
self.value = self.key = None
def __call__(self, key):
retval = None
if key == self.key:
retval = self.value
self.iternext()
elif self.key and key > self.key:
raise ValueError("Iterator has been left behind")
return retval
# This can be made more efficient by not computing the minimum key for each
# iteration
iters = [myiter(it) for it in iterables]
minvals = minkey = True
while True:
minvals = ([_f for _f in [it.key for it in iters] if _f])
if minvals:
minkey = min(minvals)
yield (minkey, [it(minkey) for it in iters])
else:
break
def is_math_text(s):
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
try:
s = six.text_type(s)
except UnicodeDecodeError:
raise ValueError(
"matplotlib display text must have all code points < 128 or use "
"Unicode strings")
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
return even_dollars
def _check_1d(x):
'''
Converts a sequence of less than 1 dimension, to an array of 1
dimension; leaves everything else untouched.
'''
if not hasattr(x, 'shape') or len(x.shape) < 1:
return np.atleast_1d(x)
else:
try:
x[:, None]
return x
except (IndexError, TypeError):
return np.atleast_1d(x)
def _reshape_2D(X):
"""
Converts a non-empty list or an ndarray of two or fewer dimensions
into a list of iterable objects so that in
for v in _reshape_2D(X):
v is iterable and can be used to instantiate a 1D array.
"""
if hasattr(X, 'shape'):
# one item
if len(X.shape) == 1:
if hasattr(X[0], 'shape'):
X = list(X)
else:
X = [X, ]
# several items
elif len(X.shape) == 2:
nrows, ncols = X.shape
if nrows == 1:
X = [X]
elif ncols == 1:
X = [X.ravel()]
else:
X = [X[:, i] for i in xrange(ncols)]
else:
raise ValueError("input `X` must have 2 or fewer dimensions")
if not hasattr(X[0], '__len__'):
X = [X]
else:
X = [np.ravel(x) for x in X]
return X
def violin_stats(X, method, points=100):
"""
Returns a list of dictionaries of data which can be used to draw a series
of violin plots. See the `Returns` section below to view the required keys
of the dictionary. Users can skip this function and pass a user-defined set
of dictionaries to the `axes.vplot` method instead of using MPL to do the
calculations.
Parameters
----------
X : array-like
Sample data that will be used to produce the gaussian kernel density
estimates. Must have 2 or fewer dimensions.
method : callable
The method used to calculate the kernel density estimate for each
column of data. When called via `method(v, coords)`, it should
return a vector of the values of the KDE evaluated at the values
specified in coords.
points : scalar, default = 100
Defines the number of points to evaluate each of the gaussian kernel
density estimates at.
Returns
-------
A list of dictionaries containing the results for each column of data.
The dictionaries contain at least the following:
- coords: A list of scalars containing the coordinates this particular
kernel density estimate was evaluated at.
- vals: A list of scalars containing the values of the kernel density
estimate at each of the coordinates given in `coords`.
- mean: The mean value for this column of data.
- median: The median value for this column of data.
- min: The minimum value for this column of data.
- max: The maximum value for this column of data.
"""
# List of dictionaries describing each of the violins.
vpstats = []
# Want X to be a list of data sequences
X = _reshape_2D(X)
for x in X:
# Dictionary of results for this distribution
stats = {}
# Calculate basic stats for the distribution
min_val = np.min(x)
max_val = np.max(x)
# Evaluate the kernel density estimate
coords = np.linspace(min_val, max_val, points)
stats['vals'] = method(x, coords)
stats['coords'] = coords
# Store additional statistics for this distribution
stats['mean'] = np.mean(x)
stats['median'] = np.median(x)
stats['min'] = min_val
stats['max'] = max_val
# Append to output
vpstats.append(stats)
return vpstats
class _NestedClassGetter(object):
# recipe from http://stackoverflow.com/a/11493777/741316
"""
When called with the containing class as the first argument,
and the name of the nested class as the second argument,
returns an instance of the nested class.
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
# make an instance of a simple object (this one will do), for which we
# can change the __class__ later on.
nested_instance = _NestedClassGetter()
# set the class of the instance, the __init__ will never be called on
# the class but the original state will be set later on by pickle.
nested_instance.__class__ = nested_class
return nested_instance
class _InstanceMethodPickler(object):
"""
Pickle cannot handle instancemethod saving. _InstanceMethodPickler
provides a solution to this.
"""
def __init__(self, instancemethod):
"""Takes an instancemethod as its only argument."""
if six.PY3:
self.parent_obj = instancemethod.__self__
self.instancemethod_name = instancemethod.__func__.__name__
else:
self.parent_obj = instancemethod.im_self
self.instancemethod_name = instancemethod.im_func.__name__
def get_instancemethod(self):
return getattr(self.parent_obj, self.instancemethod_name)
def _step_validation(x, *args):
"""
Helper function of `pts_to_*step` functions
This function does all of the normalization required to the
input and generate the template for output
"""
args = tuple(np.asanyarray(y) for y in args)
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError("x must be 1 dimensional")
if len(args) == 0:
raise ValueError("At least one Y value must be passed")
return np.vstack((x, ) + args)
def pts_to_prestep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = np.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, 0::2], steps[0, 1::2] = vertices[0, :], vertices[0, :-1]
steps[1:, 0::2], steps[1:, 1:-1:2] = vertices[1:, :], vertices[1:, 1:]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_poststep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x) - 1), np.float)
# do the to step conversion logic
steps[0, ::2], steps[0, 1:-1:2] = vertices[0, :], vertices[0, 1:]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :-1]
# convert 2D array back to tuple
return tuple(steps)
def pts_to_midstep(x, *args):
"""
Covert continuous line to pre-steps
Given a set of N points convert to 2 N -1 points
which when connected linearly give a step function
which changes values at the begining the intervals.
Parameters
----------
x : array
The x location of the steps
y1, y2, ... : array
Any number of y arrays to be turned into steps.
All must be the same length as ``x``
Returns
-------
x, y1, y2, .. : array
The x and y values converted to steps in the same order
as the input. If the input is length ``N``, each of these arrays
will be length ``2N + 1``
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
# do normalization
vertices = _step_validation(x, *args)
# create the output array
steps = ma.zeros((vertices.shape[0], 2 * len(x)), np.float)
steps[0, 1:-1:2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 2::2] = 0.5 * (vertices[0, :-1] + vertices[0, 1:])
steps[0, 0] = vertices[0, 0]
steps[0, -1] = vertices[0, -1]
steps[1:, 0::2], steps[1:, 1::2] = vertices[1:, :], vertices[1:, :]
# convert 2D array back to tuple
return tuple(steps)
STEP_LOOKUP_MAP = {'pre': pts_to_prestep,
'post': pts_to_poststep,
'mid': pts_to_midstep,
'step-pre': pts_to_prestep,
'step-post': pts_to_poststep,
'step-mid': pts_to_midstep}
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = np.atleast_1d(y)
return np.arange(y.shape[0], dtype=float), y
def safe_first_element(obj):
if isinstance(obj, collections.Iterator):
# needed to accept `array.flat` as input.
# np.flatiter reports as an instance of collections.Iterator
# but can still be indexed via [].
# This has the side effect of re-setting the iterator, but
# that is acceptable.
try:
return obj[0]
except TypeError:
pass
raise RuntimeError("matplotlib does not support generators "
"as input")
return next(iter(obj))
def normalize_kwargs(kw, alias_mapping=None, required=(), forbidden=(),
allowed=None):
"""Helper function to normalize kwarg inputs
The order they are resolved are:
1. aliasing
2. required
3. forbidden
4. allowed
This order means that only the canonical names need appear in
`allowed`, `forbidden`, `required`
Parameters
----------
alias_mapping, dict, optional
A mapping between a canonical name to a list of
aliases, in order of precedence from lowest to highest.
If the canonical value is not in the list it is assumed to have
the highest priority.
required : iterable, optional
A tuple of fields that must be in kwargs.
forbidden : iterable, optional
A list of keys which may not be in kwargs
allowed : tuple, optional
A tuple of allowed fields. If this not None, then raise if
`kw` contains any keys not in the union of `required`
and `allowed`. To allow only the required fields pass in
``()`` for `allowed`
Raises
------
TypeError
To match what python raises if invalid args/kwargs are passed to
a callable.
"""
# deal with default value of alias_mapping
if alias_mapping is None:
alias_mapping = dict()
# make a local so we can pop
kw = dict(kw)
# output dictionary
ret = dict()
# hit all alias mappings
for canonical, alias_list in six.iteritems(alias_mapping):
# the alias lists are ordered from lowest to highest priority
# so we know to use the last value in this list
tmp = []
seen = []
for a in alias_list:
try:
tmp.append(kw.pop(a))
seen.append(a)
except KeyError:
pass
# if canonical is not in the alias_list assume highest priority
if canonical not in alias_list:
try:
tmp.append(kw.pop(canonical))
seen.append(canonical)
except KeyError:
pass
# if we found anything in this set of aliases put it in the return
# dict
if tmp:
ret[canonical] = tmp[-1]
if len(tmp) > 1:
warnings.warn("Saw kwargs {seen!r} which are all aliases for "
"{canon!r}. Kept value from {used!r}".format(
seen=seen, canon=canonical, used=seen[-1]))
# at this point we know that all keys which are aliased are removed, update
# the return dictionary from the cleaned local copy of the input
ret.update(kw)
fail_keys = [k for k in required if k not in ret]
if fail_keys:
raise TypeError("The required keys {keys!r} "
"are not in kwargs".format(keys=fail_keys))
fail_keys = [k for k in forbidden if k in ret]
if fail_keys:
raise TypeError("The forbidden keys {keys!r} "
"are in kwargs".format(keys=fail_keys))
if allowed is not None:
allowed_set = set(required) | set(allowed)
fail_keys = [k for k in ret if k not in allowed_set]
if fail_keys:
raise TypeError("kwargs contains {keys!r} which are not in "
"the required {req!r} or "
"allowed {allow!r} keys".format(
keys=fail_keys, req=required,
allow=allowed))
return ret
def get_label(y, default_name):
try:
return y.name
except AttributeError:
return default_name
# Numpy > 1.6.x deprecates putmask in favor of the new copyto.
# So long as we support versions 1.6.x and less, we need the
# following local version of putmask. We choose to make a
# local version of putmask rather than of copyto because the
# latter includes more functionality than the former. Therefore
# it is easy to make a local version that gives full putmask
# behavior, but duplicating the full copyto behavior would be
# more difficult.
try:
np.copyto
except AttributeError:
_putmask = np.putmask
else:
def _putmask(a, mask, values):
return np.copyto(a, values, where=mask)
_lockstr = """\
LOCKERROR: matplotlib is trying to acquire the lock
{!r}
and has failed. This maybe due to any other process holding this
lock. If you are sure no other matplotlib process is running try
removing these folders and trying again.
"""
class Locked(object):
"""
Context manager to handle locks.
Based on code from conda.
(c) 2012-2013 Continuum Analytics, Inc. / https://www.continuum.io/
All Rights Reserved
conda is distributed under the terms of the BSD 3-clause license.
Consult LICENSE_CONDA or https://opensource.org/licenses/BSD-3-Clause.
"""
LOCKFN = '.matplotlib_lock'
class TimeoutError(RuntimeError):
pass
def __init__(self, path):
self.path = path
self.end = "-" + str(os.getpid())
self.lock_path = os.path.join(self.path, self.LOCKFN + self.end)
self.pattern = os.path.join(self.path, self.LOCKFN + '-*')
self.remove = True
def __enter__(self):
retries = 50
sleeptime = 0.1
while retries:
files = glob.glob(self.pattern)
if files and not files[0].endswith(self.end):
time.sleep(sleeptime)
retries -= 1
else:
break
else:
err_str = _lockstr.format(self.pattern)
raise self.TimeoutError(err_str)
if not files:
try:
os.makedirs(self.lock_path)
except OSError:
pass
else: # PID lock already here --- someone else will remove it.
self.remove = False
def __exit__(self, exc_type, exc_value, traceback):
if self.remove:
for path in self.lock_path, self.path:
try:
os.rmdir(path)
except OSError:
pass
|
gpl-3.0
|
mne-tools/mne-tools.github.io
|
dev/_downloads/73dfba43f3cc4a95e919bb2c5d31301a/40_visualize_raw.py
|
5
|
8454
|
# -*- coding: utf-8 -*-
"""
.. _tut-visualize-raw:
Built-in plotting methods for Raw objects
=========================================
This tutorial shows how to plot continuous data as a time series, how to plot
the spectral density of continuous data, and how to plot the sensor locations
and projectors stored in `~mne.io.Raw` objects.
As usual we'll start by importing the modules we need, loading some
:ref:`example data <sample-dataset>`, and cropping the `~mne.io.Raw`
object to just 60 seconds before loading it into RAM to save memory:
"""
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60).load_data()
###############################################################################
# We've seen in :ref:`a previous tutorial <tut-raw-class>` how to plot data
# from a `~mne.io.Raw` object using :doc:`matplotlib <matplotlib:index>`,
# but `~mne.io.Raw` objects also have several built-in plotting methods:
#
# - `~mne.io.Raw.plot`
# - `~mne.io.Raw.plot_psd`
# - `~mne.io.Raw.plot_psd_topo`
# - `~mne.io.Raw.plot_sensors`
# - `~mne.io.Raw.plot_projs_topomap`
#
# The first three are discussed here in detail; the last two are shown briefly
# and covered in-depth in other tutorials.
#
#
# Interactive data browsing with ``Raw.plot()``
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The `~mne.io.Raw.plot` method of `~mne.io.Raw` objects provides
# a versatile interface for exploring continuous data. For interactive viewing
# and data quality checking, it can be called with no additional parameters:
raw.plot()
###############################################################################
# It may not be obvious when viewing this tutorial online, but by default, the
# `~mne.io.Raw.plot` method generates an *interactive* plot window with
# several useful features:
#
# - It spaces the channels equally along the y-axis.
#
# - 20 channels are shown by default; you can scroll through the channels
# using the :kbd:`↑` and :kbd:`↓` arrow keys, or by clicking on the
# colored scroll bar on the right edge of the plot.
#
# - The number of visible channels can be adjusted by the ``n_channels``
# parameter, or changed interactively using :kbd:`page up` and :kbd:`page
# down` keys.
#
# - You can toggle the display to "butterfly" mode (superimposing all
# channels of the same type on top of one another) by pressing :kbd:`b`,
# or start in butterfly mode by passing the ``butterfly=True`` parameter.
#
# - It shows the first 10 seconds of the `~mne.io.Raw` object.
#
# - You can shorten or lengthen the window length using :kbd:`home` and
# :kbd:`end` keys, or start with a specific window duration by passing the
# ``duration`` parameter.
#
# - You can scroll in the time domain using the :kbd:`←` and
# :kbd:`→` arrow keys, or start at a specific point by passing the
# ``start`` parameter. Scrolling using :kbd:`shift`:kbd:`→` or
# :kbd:`shift`:kbd:`←` scrolls a full window width at a time.
#
# - It allows clicking on channels to mark/unmark as "bad".
#
# - When the plot window is closed, the `~mne.io.Raw` object's
# ``info`` attribute will be updated, adding or removing the newly
# (un)marked channels to/from the `~mne.Info` object's ``bads``
# field (A.K.A. ``raw.info['bads']``).
#
# - It allows interactive :term:`annotation <annotations>` of the raw data.
#
# - This allows you to mark time spans that should be excluded from future
# computations due to large movement artifacts, line noise, or other
# distortions of the signal. Annotation mode is entered by pressing
# :kbd:`a`. See :ref:`annotations-tutorial` for details.
#
# - It automatically applies any :term:`projectors <projector>` before plotting
# the data.
#
# - These can be enabled/disabled interactively by clicking the ``Proj``
# button at the lower right corner of the plot window, or disabled by
# default by passing the ``proj=False`` parameter. See
# :ref:`tut-projectors-background` for more info on projectors.
#
# These and other keyboard shortcuts are listed in the Help window, accessed
# through the ``Help`` button at the lower left corner of the plot window.
# Other plot properties (such as color of the channel traces, channel order and
# grouping, simultaneous plotting of :term:`events`, scaling, clipping,
# filtering, etc.) can also be adjusted through parameters passed to the
# `~mne.io.Raw.plot` method; see the docstring for details.
#
#
# Plotting spectral density of continuous data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# To visualize the frequency content of continuous data, the `~mne.io.Raw`
# object provides a `~mne.io.Raw.plot_psd` to plot the `spectral density`_ of
# the data.
raw.plot_psd(average=True)
###############################################################################
# If the data have been filtered, vertical dashed lines will automatically
# indicate filter boundaries. The spectrum for each channel type is drawn in
# its own subplot; here we've passed the ``average=True`` parameter to get a
# summary for each channel type, but it is also possible to plot each channel
# individually, with options for how the spectrum should be computed,
# color-coding the channels by location, and more. For example, here is a plot
# of just a few sensors (specified with the ``picks`` parameter), color-coded
# by spatial location (via the ``spatial_colors`` parameter, see the
# documentation of `~mne.io.Raw.plot_psd` for full details):
midline = ['EEG 002', 'EEG 012', 'EEG 030', 'EEG 048', 'EEG 058', 'EEG 060']
raw.plot_psd(picks=midline)
###############################################################################
# Alternatively, you can plot the PSD for every sensor on its own axes, with
# the axes arranged spatially to correspond to sensor locations in space, using
# `~mne.io.Raw.plot_psd_topo`:
raw.plot_psd_topo()
###############################################################################
# This plot is also interactive; hovering over each "thumbnail" plot will
# display the channel name in the bottom left of the plot window, and clicking
# on a thumbnail plot will create a second figure showing a larger version of
# the selected channel's spectral density (as if you had called
# `~mne.io.Raw.plot_psd` on that channel).
#
# By default, `~mne.io.Raw.plot_psd_topo` will show only the MEG
# channels if MEG channels are present; if only EEG channels are found, they
# will be plotted instead:
raw.copy().pick_types(meg=False, eeg=True).plot_psd_topo()
###############################################################################
# Plotting sensor locations from ``Raw`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The channel locations in a `~mne.io.Raw` object can be easily plotted
# with the `~mne.io.Raw.plot_sensors` method. A brief example is shown
# here; notice that channels in ``raw.info['bads']`` are plotted in red. More
# details and additional examples are given in the tutorial
# :ref:`tut-sensor-locations`.
raw.plot_sensors(ch_type='eeg')
###############################################################################
# .. _`tut-section-raw-plot-proj`:
#
# Plotting projectors from ``Raw`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# As seen in the output of `mne.io.read_raw_fif` above, there are
# :term:`projectors <projector>` included in the example `~mne.io.Raw`
# file (representing environmental noise in the signal, so it can later be
# "projected out" during preprocessing). You can visualize these projectors
# using the `~mne.io.Raw.plot_projs_topomap` method. By default it will
# show one figure per channel type for which projectors are present, and each
# figure will have one subplot per projector. The three projectors in this file
# were only computed for magnetometers, so one figure with three subplots is
# generated. More details on working with and plotting projectors are given in
# :ref:`tut-projectors-background` and :ref:`tut-artifact-ssp`.
raw.plot_projs_topomap(colorbar=True)
###############################################################################
# .. LINKS
#
# .. _spectral density: https://en.wikipedia.org/wiki/Spectral_density
|
bsd-3-clause
|
perrygeo/geopandas
|
tests/test_tools.py
|
8
|
1589
|
from __future__ import absolute_import
from shapely.geometry import Point, MultiPoint, LineString
from geopandas import GeoSeries
from geopandas.tools import collect
from .util import unittest
class TestTools(unittest.TestCase):
def setUp(self):
self.p1 = Point(0,0)
self.p2 = Point(1,1)
self.p3 = Point(2,2)
self.mpc = MultiPoint([self.p1, self.p2, self.p3])
self.mp1 = MultiPoint([self.p1, self.p2])
self.line1 = LineString([(3,3), (4,4)])
def test_collect_single(self):
result = collect(self.p1)
self.assert_(self.p1.equals(result))
def test_collect_single_force_multi(self):
result = collect(self.p1, multi=True)
expected = MultiPoint([self.p1])
self.assert_(expected.equals(result))
def test_collect_multi(self):
result = collect(self.mp1)
self.assert_(self.mp1.equals(result))
def test_collect_multi_force_multi(self):
result = collect(self.mp1)
self.assert_(self.mp1.equals(result))
def test_collect_list(self):
result = collect([self.p1, self.p2, self.p3])
self.assert_(self.mpc.equals(result))
def test_collect_GeoSeries(self):
s = GeoSeries([self.p1, self.p2, self.p3])
result = collect(s)
self.assert_(self.mpc.equals(result))
def test_collect_mixed_types(self):
with self.assertRaises(ValueError):
collect([self.p1, self.line1])
def test_collect_mixed_multi(self):
with self.assertRaises(ValueError):
collect([self.mpc, self.mp1])
|
bsd-3-clause
|
pymir3/pymir3
|
scripts/acf/tr_svm_reg.py
|
1
|
3210
|
from sklearn.grid_search import GridSearchCV
from model_training import ModelTrainer, ModelTrainerInput
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import f_classif as anova
from sklearn.cross_validation import ShuffleSplit
from sklearn.preprocessing import StandardScaler
import time
import dill
import numpy as np
class SvmRegModelTrainer(ModelTrainer):
def __init__(self):
pass
def train(self, train_data):
"""
:param train_data:
:type train_data: ModelTrainerInput
"""
probability = self.params['svm_reg']['probability']
if self.params['svm_reg']['balanced_class_weights']:
svmc = SVC(kernel='rbf', probability=probability, class_weight='balanced')
else:
svmc = SVC(kernel='rbf', probability=probability)
Cs = self.params['svm_reg']['Cs']
gammas = self.params['svm_reg']['gammas']
out_filename = self.params['general']['scratch_directory'] + "/" + self.params['model_training']['output_model']
print "training model with SVM and grid search (%d combinations)..." % (len(Cs) * len(gammas))
print "using ANOVA feature selection"
print "training set size: %d, # of features: %d" % (len(train_data.labels), train_data.features.shape[1])
print "gammas: ", gammas
print "C:", Cs
T0 = time.time()
features = train_data.features
labels = train_data.labels
transform = SelectPercentile(anova)
scaler = StandardScaler()
clf = Pipeline([ ('standardizer', scaler ), ('anova', transform), ('svm', svmc)])
percentiles = (np.arange(11) * 10)[1:]
cv = ShuffleSplit(len(train_data.labels), n_iter=10, test_size=0.2, random_state=0)
estimator = GridSearchCV(clf,
dict(anova__percentile=percentiles,
svm__gamma=gammas,
svm__C=Cs),
cv=cv,
n_jobs=self.params['svm_reg']['num_workers'])
scaler.fit(features)
features = scaler.transform(features)
estimator.fit(features, labels)
T1 = time.time()
print "model training took %f seconds" % (T1-T0)
print "best model score: %f" % (estimator.best_score_)
best_percentile = estimator.best_estimator_.named_steps['anova'].percentile
best_c = estimator.best_estimator_.named_steps['svm'].C
best_gamma = estimator.best_estimator_.named_steps['svm'].gamma
print "best params found for SVM: C = %.2ef, gamma = %.2ef" % (best_c, best_gamma)
print "best params found for ANOVA: percetile = %d" % (best_percentile)
print "saved best model to %s" % (out_filename)
outfile = open(out_filename, "w")
dill.dump(estimator.best_estimator_, outfile, dill.HIGHEST_PROTOCOL )
outfile_scaler = open('%s.scaler' % out_filename, "w")
dill.dump(scaler, outfile_scaler, dill.HIGHEST_PROTOCOL)
#dill.dump( StandardScaler().fit(features) )
|
mit
|
Bismarrck/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimators_test.py
|
46
|
6682
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
eklitzke/schemaless
|
examples/mysqlbench/plot.py
|
1
|
1297
|
import csv
import sys
import optparse
import matplotlib
from matplotlib import pyplot
pyplot.rcParams.update({
'backend': 'cairo',
'axes.labelsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'font.sans-serif': ['Droid Sans']})
def main(csv_name, opts):
reader = iter(csv.reader(open(csv_name)))
names = reader.next()
data = dict((n, []) for n in names)
for row in reader:
for name, val in zip(names, row):
data[name].append(float(val))
for name in names[1:]:
xs, ys = [], []
for x in xrange(len(data[name])):
xs.append(data['cumulative'][x])
ys.append(data[name][x])
pyplot.plot(xs, ys, label=name)
#pyplot.scatter(xs, ys, label=name)
pyplot.xlabel('cumulative # of records inserted')
pyplot.ylabel('seconds per 10k inserts')
pyplot.legend(loc=2)
if opts.title:
pyplot.title(opts.title)
pyplot.savefig(opts.output, format='png', dpi=120)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-t', '--title', default=None, help='the title to use')
parser.add_option('-o', '--output', default='graph.png', help='what file to output to')
opts, args = parser.parse_args()
if len(args) != 1:
parser.error('must specify an input file')
main(args[0], opts)
|
isc
|
PmagPy/PmagPy
|
programs/chi_magic2.py
|
2
|
10879
|
#!/usr/bin/env python
import sys
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.pmag as pmag
def main():
"""
NAME
chi_magic.py
DESCRIPTION
plots magnetic susceptibility as a function of frequency and temperature and AC field
SYNTAX
chi_magic.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of FILE and temperature step
-f FILE, specify magic_measurements format file
-T IND, specify temperature step to plot
-e EXP, specify experiment name to plot
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-sav save figure and quit
DEFAULTS
FILE: magic_measurements.txt
IND: first
SPEC: step through one by one
"""
cont, FTinit, BTinit, k = "", 0, 0, 0
meas_file = "magic_measurements.txt"
spec = ""
Tind, cont = 0, ""
EXP = ""
fmt = 'svg' # default image type for saving
plot = 0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
fname = input(
"Input magic_measurements file name? [magic_measurements.txt] ")
if fname != "":
meas_file = fname
if '-e' in sys.argv:
ind = sys.argv.index('-e')
EXP = sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file = sys.argv[ind+1]
if '-T' in sys.argv:
ind = sys.argv.index('-T')
Tind = int(sys.argv[ind+1])
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if '-sav' in sys.argv:
plot = 1
#
meas_data, file_type = pmag.magic_read(meas_file)
#
# get list of unique experiment names
#
# initialize some variables (a continuation flag, plot initialization flags and the experiment counter
experiment_names = []
for rec in meas_data:
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
#
# hunt through by experiment name
if EXP != "":
try:
k = experiment_names.index(EXP)
except:
print("Bad experiment name")
sys.exit()
while k < len(experiment_names):
e = experiment_names[k]
if EXP == "":
print(e, k+1, 'out of ', len(experiment_names))
#
# initialize lists of data, susceptibility, temperature, frequency and field
X, T, F, B = [], [], [], []
for rec in meas_data:
methcodes = rec['magic_method_codes']
meths = methcodes.strip().split(':')
if rec['magic_experiment_name'] == e and "LP-X" in meths: # looking for chi measurement
if 'measurement_temp' not in list(rec.keys()):
rec['measurement_temp'] = '300' # set defaults
if 'measurement_freq' not in list(rec.keys()):
rec['measurement_freq'] = '0' # set defaults
if 'measurement_lab_field_ac' not in list(rec.keys()):
rec['measurement_lab_field_ac'] = '0' # set default
if 'measurement_x' in rec.keys():
# backward compatibility
X.append(float(rec['measurement_x']))
else:
# data model 2.5
X.append(float(rec['measurement_chi_volume']))
T.append(float(rec['measurement_temp']))
F.append(float(rec['measurement_freq']))
B.append(float(rec['measurement_lab_field_ac']))
#
# get unique list of Ts,Fs, and Bs
#
Ts, Fs, Bs = [], [], []
for k in range(len(X)): # hunt through all the measurements
if T[k] not in Ts:
Ts.append(T[k]) # append if not in list
if F[k] not in Fs:
Fs.append(F[k])
if B[k] not in Bs:
Bs.append(B[k])
Ts.sort() # sort list of temperatures, frequencies and fields
Fs.sort()
Bs.sort()
if '-x' in sys.argv:
k = len(experiment_names)+1 # just plot the one
else:
k += 1 # increment experiment number
#
# plot chi versus T and F holding B constant
#
plotnum = 1 # initialize plot number to 1
if len(X) > 2: # if there are any data to plot, continue
b = Bs[-1] # keeping field constant and at maximum
XTF = [] # initialize list of chi versus Temp and freq
for f in Fs: # step through frequencies sequentially
XT = [] # initialize list of chi versus temp
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTF.append(XT) # append list to list of frequencies
if len(XT) > 1: # if there are any temperature dependent data
pmagplotlib.plot_init(plotnum, 5, 5) # initialize plot
# call the plotting function
pmagplotlib.plot_xtf(plotnum, XTF, Fs, e, b)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum}) # make it visible
plotnum += 1 # increment plot number
f = Fs[0] # set frequency to minimum
XTB = [] # initialize list if chi versus Temp and field
for b in Bs: # step through field values
XT = [] # initial chi versus temp list for this field
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTB.append(XT)
if len(XT) > 1: # if there are any temperature dependent data
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
# call the plotting function
pmagplotlib.plot_xtb(plotnum, XTB, Bs, e, f)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
plotnum += 1 # increment plot number
if '-i' in sys.argv:
for ind in range(len(Ts)): # print list of temperatures available
print(ind, int(Ts[ind]))
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == 'a':
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e+'_'+key+'.'+fmt
PLTS[key] = key
pmagplotlib.save_plots(PLTS, files)
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
cont = 'q'
while cont != "q":
if '-i' in sys.argv:
Tind = int(cont) # set temperature index
b = Bs[-1] # set field to max available
XF = [] # initial chi versus frequency list
for kk in range(len(X)): # hunt through the data
if T[kk] == Ts[Tind] and B[kk] == b: # if temperature and field match,
XF.append([X[kk], F[kk]]) # append the data
if len(XF) > 1: # if there are any data to plot
if FTinit == 0: # if not already initialized, initialize plot
# print 'initializing ',plotnum
pmagplotlib.plot_init(plotnum, 5, 5)
FTinit = 1
XFplot = plotnum
plotnum += 1 # increment plotnum
pmagplotlib.plot_xft(XFplot, XF, Ts[Tind], e, b)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
else:
print(
'\n *** Skipping susceptibitily-frequency plot as a function of temperature *** \n')
f = Fs[0] # set frequency to minimum available
XB = [] # initialize chi versus field list
for kk in range(len(X)): # hunt through the data
# if temperature and field match those desired
if T[kk] == Ts[Tind] and F[kk] == f:
XB.append([X[kk], B[kk]]) # append the data to list
if len(XB) > 4: # if there are any data
if BTinit == 0: # if plot not already initialized
pmagplotlib.plot_init(plotnum, 5, 5) # do it
BTinit = 1
# and call plotting function
pmagplotlib.plot_xbt(plotnum, XB, Ts[Tind], e, f)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
else:
print(
'Skipping susceptibitily - AC field plot as a function of temperature')
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e+'_'+key+'.'+fmt
PLTS[key] = p
if '-i' in sys.argv:
# just in case you forgot, print out a new list of temperatures
for ind in range(len(Ts)):
print(ind, int(Ts[ind]))
# ask for new temp
cont = input(
"Enter index of next temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
sys.exit()
if cont == 'a':
pmagplotlib.save_plots(PLTS, files)
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
sys.exit()
elif plot == 0:
ans = input(
"enter s[a]ve to save files, [return] to quit ")
if ans == 'a':
pmagplotlib.save_plots(PLTS, files)
sys.exit()
else:
sys.exit()
else:
pmagplotlib.save_plots(PLTS, files)
sys.exit()
if __name__ == "__main__":
main()
|
bsd-3-clause
|
ibmsoe/tensorflow
|
tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py
|
75
|
29377
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.local_variables_initializer())
if variables.global_variables():
session.run(variables.global_variables_initializer())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
left_rows = self.select_rows(left_mask)
right_rows = self.select_rows(right_mask)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def split_fast(self, index_series, proportion, batch_size,
base_batch_size=1000):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
base_batch_size: the batch size to use for materialized data, prior to the
split.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
self["left_mask__"] = left_mask
self["right_mask__"] = right_mask
# TODO(soergel): instead of base_batch_size can we just do one big batch?
# avoid computing the hashes twice
m = self.materialize_to_memory(batch_size=base_batch_size)
left_rows_df = m.select_rows(m["left_mask__"])
right_rows_df = m.select_rows(m["right_mask__"])
del left_rows_df[["left_mask__", "right_mask__"]]
del right_rows_df[["left_mask__", "right_mask__"]]
# avoid recomputing the split repeatedly
left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size)
right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size)
return left_rows_df, right_rows_df
def run_one_batch(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
def materialize_to_memory(self, batch_size):
unordered_dict_of_arrays = self.run_one_epoch()
# there may already be an 'index' column, in which case from_ordereddict)
# below will complain because it wants to generate a new one.
# for now, just remove it.
# TODO(soergel): preserve index history, potentially many levels deep
del unordered_dict_of_arrays["index"]
# the order of the columns in this dict is arbitrary; we just need it to
# remain consistent.
ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays)
return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays,
batch_size=batch_size)
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
@classmethod
def from_ordereddict(cls,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`.
The returned `DataFrame` contains a column for each key of the dict plus an
extra 'index' column. The 'index' column contains the row number. Each of
the other columns contains a row from the corresponding array.
Args:
ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a
data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given arrays.
Raises:
ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'.
"""
numpy_source = in_memory_source.OrderedDictNumpySource(
ordered_dict_of_arrays,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
|
apache-2.0
|
RyanChinSang/LeagueLatency
|
BETA/Test Code/GradLine/GradLine1.py
|
1
|
3112
|
import numpy as np
import matplotlib.pyplot as plt
def highResPoints(x,y,factor=10):
'''
Take points listed in two vectors and return them at a higher
resultion. Create at least factor*len(x) new points that include the
original points and those spaced in between.
Returns new x and y arrays as a tuple (x,y).
'''
# r is the distance spanned between pairs of points
r = [0]
for i in range(1,len(x)):
dx = x[i]-x[i-1]
dy = y[i]-y[i-1]
r.append(np.sqrt(dx*dx+dy*dy))
r = np.array(r)
# rtot is a cumulative sum of r, it's used to save time
rtot = []
for i in range(len(r)):
rtot.append(r[0:i].sum())
rtot.append(r.sum())
dr = rtot[-1]/(NPOINTS*RESFACT-1)
xmod=[x[0]]
ymod=[y[0]]
rPos = 0 # current point on walk along data
rcount = 1
while rPos < r.sum():
x1,x2 = x[rcount-1],x[rcount]
y1,y2 = y[rcount-1],y[rcount]
dpos = rPos-rtot[rcount]
theta = np.arctan2((x2-x1),(y2-y1))
rx = np.sin(theta)*dpos+x1
ry = np.cos(theta)*dpos+y1
xmod.append(rx)
ymod.append(ry)
rPos+=dr
while rPos > rtot[rcount+1]:
rPos = rtot[rcount+1]
rcount+=1
if rcount>rtot[-1]:
break
return xmod,ymod
#CONSTANTS
NPOINTS = 10
COLOR='blue'
RESFACT=10
MAP='winter' # choose carefully, or color transitions will not appear smoooth
# create random data
np.random.seed(101)
x = np.random.rand(NPOINTS)
y = np.random.rand(NPOINTS)
fig = plt.figure()
ax1 = fig.add_subplot(221) # regular resolution color map
ax2 = fig.add_subplot(222) # regular resolution alpha
ax3 = fig.add_subplot(223) # high resolution color map
ax4 = fig.add_subplot(224) # high resolution alpha
# Choose a color map, loop through the colors, and assign them to the color
# cycle. You need NPOINTS-1 colors, because you'll plot that many lines
# between pairs. In other words, your line is not cyclic, so there's
# no line from end to beginning
cm = plt.get_cmap(MAP)
ax1.set_color_cycle([cm(1.*i/(NPOINTS-1)) for i in range(NPOINTS-1)])
for i in range(NPOINTS-1):
ax1.plot(x[i:i+2],y[i:i+2])
ax1.text(.05,1.05,'Reg. Res - Color Map')
ax1.set_ylim(0,1.2)
# same approach, but fixed color and
# alpha is scale from 0 to 1 in NPOINTS steps
for i in range(NPOINTS-1):
ax2.plot(x[i:i+2],y[i:i+2],alpha=float(i)/(NPOINTS-1),color=COLOR)
ax2.text(.05,1.05,'Reg. Res - alpha')
ax2.set_ylim(0,1.2)
# get higher resolution data
xHiRes,yHiRes = highResPoints(x,y,RESFACT)
npointsHiRes = len(xHiRes)
cm = plt.get_cmap(MAP)
ax3.set_color_cycle([cm(1.*i/(npointsHiRes-1))
for i in range(npointsHiRes-1)])
for i in range(npointsHiRes-1):
ax3.plot(xHiRes[i:i+2],yHiRes[i:i+2])
ax3.text(.05,1.05,'Hi Res - Color Map')
ax3.set_ylim(0,1.2)
for i in range(npointsHiRes-1):
ax4.plot(xHiRes[i:i+2],yHiRes[i:i+2],
alpha=float(i)/(npointsHiRes-1),
color=COLOR)
ax4.text(.05,1.05,'High Res - alpha')
ax4.set_ylim(0,1.2)
# fig.savefig('gradColorLine.png')
plt.show()
|
gpl-3.0
|
numenta/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/scale.py
|
69
|
13414
|
import textwrap
import numpy as np
from numpy import ma
MaskedArray = ma.MaskedArray
from cbook import dedent
from ticker import NullFormatter, ScalarFormatter, LogFormatterMathtext, Formatter
from ticker import NullLocator, LogLocator, AutoLocator, SymmetricalLogLocator, FixedLocator
from transforms import Transform, IdentityTransform
class ScaleBase(object):
"""
The base class for all scales.
Scales are separable transformations, working on a single dimension.
Any subclasses will want to override:
- :attr:`name`
- :meth:`get_transform`
And optionally:
- :meth:`set_default_locators_and_formatters`
- :meth:`limit_range_for_scale`
"""
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` object
associated with this scale.
"""
raise NotImplementedError
def set_default_locators_and_formatters(self, axis):
"""
Set the :class:`~matplotlib.ticker.Locator` and
:class:`~matplotlib.ticker.Formatter` objects on the given
axis to match this scale.
"""
raise NotImplementedError
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Returns the range *vmin*, *vmax*, possibly limited to the
domain supported by this scale.
*minpos* should be the minimum positive value in the data.
This is used by log scales to determine a minimum value.
"""
return vmin, vmax
class LinearScale(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to reasonable defaults for
linear scaling.
"""
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
The transform for linear scaling is just the
:class:`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
def _mask_non_positives(a):
"""
Return a Numpy masked array where all non-positive values are
masked. If there are no non-positive values, the original array
is returned.
"""
mask = a <= 0.0
if mask.any():
return ma.MaskedArray(a, mask=mask)
return a
class LogScale(ScaleBase):
"""
A standard logarithmic scale. Care is taken so non-positive
values are not plotted.
For computational efficiency (to push as much as possible to Numpy
C code in the common cases), this scale provides different
transforms depending on the base of the logarithm:
- base 10 (:class:`Log10Transform`)
- base 2 (:class:`Log2Transform`)
- base e (:class:`NaturalLogTransform`)
- arbitrary base (:class:`LogTransform`)
"""
name = 'log'
class Log10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
a = _mask_non_positives(a * 10.0)
if isinstance(a, MaskedArray):
return ma.log10(a)
return np.log10(a)
def inverted(self):
return LogScale.InvertedLog10Transform()
class InvertedLog10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
return ma.power(10.0, a) / 10.0
def inverted(self):
return LogScale.Log10Transform()
class Log2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
a = _mask_non_positives(a * 2.0)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(2)
return np.log2(a)
def inverted(self):
return LogScale.InvertedLog2Transform()
class InvertedLog2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
return ma.power(2.0, a) / 2.0
def inverted(self):
return LogScale.Log2Transform()
class NaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
a = _mask_non_positives(a * np.e)
if isinstance(a, MaskedArray):
return ma.log(a)
return np.log(a)
def inverted(self):
return LogScale.InvertedNaturalLogTransform()
class InvertedNaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
return ma.power(np.e, a) / np.e
def inverted(self):
return LogScale.NaturalLogTransform()
class LogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
a = _mask_non_positives(a * self.base)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(self.base)
return np.log(a) / np.log(self.base)
def inverted(self):
return LogScale.InvertedLogTransform(self.base)
class InvertedLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
return ma.power(self.base, a) / self.base
def inverted(self):
return LogScale.LogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
subs = kwargs.pop('subsy', None)
if base == 10.0:
self._transform = self.Log10Transform()
elif base == 2.0:
self._transform = self.Log2Transform()
elif base == np.e:
self._transform = self.NaturalLogTransform()
else:
self._transform = self.LogTransform(base)
self.base = base
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
log scaling.
"""
axis.set_major_locator(LogLocator(self.base))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(LogLocator(self.base, self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self._transform
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
return (vmin <= 0.0 and minpos or vmin,
vmax <= 0.0 and minpos or vmax)
class SymmetricalLogScale(ScaleBase):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
name = 'symlog'
class SymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._linadjust = (np.log(linthresh) / self._log_base) / linthresh
def transform(self, a):
a = np.asarray(a)
sign = np.sign(a)
masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
log = sign * ma.log(np.abs(masked)) / self._log_base
if masked.mask.any():
return np.asarray(ma.where(masked.mask,
a * self._linadjust,
log))
else:
return np.asarray(log)
def inverted(self):
return SymmetricalLogScale.InvertedSymmetricalLogTransform(self.base, self.linthresh)
class InvertedSymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._log_linthresh = np.log(linthresh) / self._log_base
self._linadjust = linthresh / (np.log(linthresh) / self._log_base)
def transform(self, a):
a = np.asarray(a)
return np.where(a <= self._log_linthresh,
np.where(a >= -self._log_linthresh,
a * self._linadjust,
-(np.power(self.base, -a))),
np.power(self.base, a))
def inverted(self):
return SymmetricalLogScale.SymmetricalLogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*linthreshx*/*linthreshy*:
The range (-*x*, *x*) within which the plot is linear (to
avoid having the plot go to infinity around zero).
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
linthresh = kwargs.pop('linthreshx', 2.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
linthresh = kwargs.pop('linthreshy', 2.0)
subs = kwargs.pop('subsy', None)
self._transform = self.SymmetricalLogTransform(base, linthresh)
self.base = base
self.linthresh = linthresh
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
symmetrical log scaling.
"""
axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(), self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`SymmetricalLogTransform` instance.
"""
return self._transform
_scale_mapping = {
'linear' : LinearScale,
'log' : LogScale,
'symlog' : SymmetricalLogScale
}
def get_scale_names():
names = _scale_mapping.keys()
names.sort()
return names
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
ACCEPTS: [ %(names)s ]
"""
scale = scale.lower()
if scale is None:
scale = 'linear'
if scale not in _scale_mapping:
raise ValueError("Unknown scale type '%s'" % scale)
return _scale_mapping[scale](axis, **kwargs)
scale_factory.__doc__ = dedent(scale_factory.__doc__) % \
{'names': " | ".join(get_scale_names())}
def register_scale(scale_class):
"""
Register a new kind of scale.
*scale_class* must be a subclass of :class:`ScaleBase`.
"""
_scale_mapping[scale_class.name] = scale_class
def get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name in get_scale_names():
scale_class = _scale_mapping[name]
docs.append(" '%s'" % name)
docs.append("")
class_docs = dedent(scale_class.__init__.__doc__)
class_docs = "".join([" %s\n" %
x for x in class_docs.split("\n")])
docs.append(class_docs)
docs.append("")
return "\n".join(docs)
|
agpl-3.0
|
kubeflow/kfp-tekton
|
components/XGBoost/Train/component.py
|
2
|
3460
|
from kfp.components import InputPath, OutputPath, create_component_from_func
def xgboost_train(
training_data_path: InputPath('CSV'), # Also supports LibSVM
model_path: OutputPath('XGBoostModel'),
model_config_path: OutputPath('XGBoostModelConfig'),
starting_model_path: InputPath('XGBoostModel') = None,
label_column: int = 0,
num_iterations: int = 10,
booster_params: dict = None,
# Booster parameters
objective: str = 'reg:squarederror',
booster: str = 'gbtree',
learning_rate: float = 0.3,
min_split_loss: float = 0,
max_depth: int = 6,
):
'''Train an XGBoost model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary XGBoost format.
model_config_path: Output path for the internal parameter configuration of Booster as a JSON string.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
num_boost_rounds: Number of boosting iterations.
booster_params: Parameters for the booster. See https://xgboost.readthedocs.io/en/latest/parameter.html
objective: The learning task and the corresponding learning objective.
See https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters
The most common values are:
"reg:squarederror" - Regression with squared loss (default).
"reg:logistic" - Logistic regression.
"binary:logistic" - Logistic regression for binary classification, output probability.
"binary:logitraw" - Logistic regression for binary classification, output score before logistic transformation
"rank:pairwise" - Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
"rank:ndcg" - Use LambdaMART to perform list-wise ranking where Normalized Discounted Cumulative Gain (NDCG) is maximized
Annotations:
author: Alexey Volkov <[email protected]>
'''
import pandas
import xgboost
df = pandas.read_csv(
training_data_path,
)
training_data = xgboost.DMatrix(
data=df.drop(columns=[df.columns[label_column]]),
label=df[df.columns[label_column]],
)
booster_params = booster_params or {}
booster_params.setdefault('objective', objective)
booster_params.setdefault('booster', booster)
booster_params.setdefault('learning_rate', learning_rate)
booster_params.setdefault('min_split_loss', min_split_loss)
booster_params.setdefault('max_depth', max_depth)
starting_model = None
if starting_model_path:
starting_model = xgboost.Booster(model_file=starting_model_path)
model = xgboost.train(
params=booster_params,
dtrain=training_data,
num_boost_round=num_iterations,
xgb_model=starting_model
)
# Saving the model in binary format
model.save_model(model_path)
model_config_str = model.save_config()
with open(model_config_path, 'w') as model_config_file:
model_config_file.write(model_config_str)
if __name__ == '__main__':
create_component_from_func(
xgboost_train,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=[
'xgboost==1.1.1',
'pandas==1.0.5',
]
)
|
apache-2.0
|
saiwing-yeung/scikit-learn
|
examples/linear_model/plot_lasso_and_elasticnet.py
|
73
|
2074
|
"""
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
|
bsd-3-clause
|
GeraldLoeffler/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/legend.py
|
69
|
30705
|
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Return value is a sequence of text, line instances that make
up the legend
"""
from __future__ import division
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection
from matplotlib.transforms import Bbox
from matplotlib.offsetbox import HPacker, VPacker, PackerBase, TextArea, DrawingArea
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
Return value is a sequence of text, line instances that make
up the legend
"""
codes = {'best' : 0, # only implemented for axis legends
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc = None,
numpoints = None, # the number of points in the legend line
markerscale = None, # the relative size of legend markers vs. original
scatterpoints = 3, # TODO: may be an rcParam
scatteryoffsets=None,
prop = None, # properties for the legend texts
# the following dimensions are in axes coords
pad = None, # deprecated; use borderpad
labelsep = None, # deprecated; use labelspacing
handlelen = None, # deprecated; use handlelength
handletextsep = None, # deprecated; use handletextpad
axespad = None, # deprecated; use borderaxespad
# spacing & pad defined as a fractionof the font-size
borderpad = None, # the whitespace inside the legend border
labelspacing=None, #the vertical space between the legend entries
handlelength=None, # the length of the legend handles
handletextpad=None, # the pad between the legend handle and text
borderaxespad=None, # the pad between the axes and legend border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns. None, "expand"
fancybox=None, # True use a fancy box, false use a rounded box, none use rc
shadow = None,
):
"""
- *parent* : the artist that contains the legend
- *handles* : a list of artists (lines, patches) to add to the legend
- *labels* : a list of strings to label the legend
Optional keyword arguments:
================ ==================================================================
Keyword Description
================ ==================================================================
loc a location code or a tuple of coordinates
numpoints the number of points in the legend line
prop the font property
markerscale the relative size of legend markers vs. original
fancybox if True, draw a frame with a round fancybox. If None, use rc
shadow if True, draw a shadow behind legend
scatteryoffsets a list of yoffsets for scatter symbols in legend
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
The dimensions of pad and spacing are given as a fraction of the
fontsize. Values from rcParams will be used if None.
"""
from matplotlib.axes import Axes # local import only to avoid circularity
from matplotlib.figure import Figure # local import only to avoid circularity
Artist.__init__(self)
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
else:
self.prop=prop
self.fontsize = self.prop.get_size_in_points()
propnames=['numpoints', 'markerscale', 'shadow', "columnspacing",
"scatterpoints"]
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend."+name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad":"borderpad",
"labelsep":"labelspacing",
"handlelen":"handlelength",
"handletextsep":"handletextpad",
"axespad":"borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height)/self.fontsize
for k, v in deprecated_kwds.items():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
DeprecationWarning)
setattr(self, v, localdict[k]*axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend."+v])
else:
setattr(self, v, localdict[v])
del localdict
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be >= 0; it was %d"% numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3./8., 4./8., 2.5/8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.numpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets, reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent,Axes):
self.isaxes = True
self.set_figure(parent.figure)
elif isinstance(parent,Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0,'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back on "best"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._loc = loc
self._mode = mode
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox == True:
self.legendPatch.set_boxstyle("round",pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square",pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = True
# init with null renderer
self._init_legend_box(handles, labels)
self._last_fontsize_points = self.fontsize
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
for c in self.get_children():
c.set_figure(self.figure)
a.set_transform(self.get_transform())
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox+xdescent, oy+ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc)==2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.parent.bbox
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox, self.parent.bbox, renderer)
return x+xdescent, y+ydescent
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible(): return
self._update_legend_box(renderer)
renderer.open_group('legend')
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
if self._loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
def findoffset(width, height, xdescent, ydescent):
return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(findoffset)
fontsize = renderer.points_to_pixels(self.fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2*(self.borderaxespad+self.borderpad)*fontsize
self._legend_box.set_width(self.parent.bbox.width-pad)
if self._drawFrame:
# update the location and size of the legend
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self.fontsize
else:
return renderer.points_to_pixels(self.fontsize)
def _init_legend_box(self, handles, labels):
"""
Initiallize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self.fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
for l in labels:
textbox = TextArea(l, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height() * 0.7
descent = 0.
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their corrdinates should
# be given in the display coordinates.
# NOTE : the coordinates will be updated again in
# _update_legend_box() method.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
for handle in handles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
legline.update_from(handle)
self._set_artist_props(legline) # after update
legline.set_clip_box(None)
legline.set_clip_path(None)
legline.set_drawstyle('default')
legline.set_marker('None')
handle_list.append(legline)
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
legline_marker.update_from(handle)
self._set_artist_props(legline_marker)
legline_marker.set_clip_box(None)
legline_marker.set_clip_path(None)
legline_marker.set_linestyle('None')
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correpondence.
legline._legmarker = legline_marker
elif isinstance(handle, Patch):
p = Rectangle(xy=(0., 0.),
width = self.handlelength*fontsize,
height=(height-descent),
)
p.update_from(handle)
self._set_artist_props(p)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
elif isinstance(handle, LineCollection):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self._set_artist_props(legline)
legline.set_clip_box(None)
legline.set_clip_path(None)
lw = handle.get_linewidth()[0]
dashes = handle.get_dashes()[0]
color = handle.get_colors()[0]
legline.set_color(color)
legline.set_linewidth(lw)
legline.set_dashes(dashes)
handle_list.append(legline)
elif isinstance(handle, RegularPolyCollection):
#ydata = self._scatteryoffsets
ydata = height*self._scatteryoffsets
size_max, size_min = max(handle.get_sizes()),\
min(handle.get_sizes())
# we may need to scale these sizes by "markerscale"
# attribute. But other handle types does not seem
# to care about this attribute and it is currently ignored.
if self.scatterpoints < 4:
sizes = [.5*(size_max+size_min), size_max,
size_min]
else:
sizes = (size_max-size_min)*np.linspace(0,1,self.scatterpoints)+size_min
p = type(handle)(handle.get_numsides(),
rotation=handle.get_rotation(),
sizes=sizes,
offsets=zip(xdata_marker,ydata),
transOffset=self.get_transform(),
)
p.update_from(handle)
p.set_figure(self.figure)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
else:
handle_list.append(None)
handlebox = DrawingArea(width=self.handlelength*fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handle_list[-1]
handlebox.add_artist(handle)
if hasattr(handle, "_legmarker"):
handlebox.add_artist(handle._legmarker)
handleboxes.append(handlebox)
# We calculate number of lows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaing
# (num_smallcol) columns will have (nrows) rows.
nrows, num_largecol = divmod(len(handleboxes), self._ncol)
num_smallcol = self._ncol-num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0, num_largecol*(nrows+1), (nrows+1)),
[nrows+1] * num_largecol)
smallcol = safezip(range(num_largecol*(nrows+1), len(handleboxes), nrows),
[nrows] * num_smallcol)
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol+smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad*fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0+di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing*fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing*fontsize
self._legend_box = HPacker(pad=self.borderpad*fontsize,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _update_legend_box(self, renderer):
"""
Update the dimension of the legend_box. This is required
becuase the paddings, the hadle size etc. depends on the dpi
of the renderer.
"""
# fontsize in points.
fontsize = renderer.points_to_pixels(self.fontsize)
if self._last_fontsize_points == fontsize:
# no update is needed
return
# each handle needs to be drawn inside a box of
# (x, y, w, h) = (0, -descent, width, height).
# And their corrdinates should be given in the display coordinates.
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height(renderer) * 0.7
descent = 0.
for handle in self.legendHandles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
legline = handle
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline.set_data(xdata, ydata)
legline_marker = legline._legmarker
legline_marker.set_data(xdata_marker, ydata[:len(xdata_marker)])
elif isinstance(handle, Patch):
p = handle
p.set_bounds(0., 0.,
self.handlelength*fontsize,
(height-descent),
)
elif isinstance(handle, RegularPolyCollection):
p = handle
ydata = height*self._scatteryoffsets
p.set_offsets(zip(xdata_marker,ydata))
# correction factor
cor = fontsize / self._last_fontsize_points
# helper function to iterate over all children
def all_children(parent):
yield parent
for c in parent.get_children():
for cc in all_children(c): yield cc
#now update paddings
for box in all_children(self._legend_box):
if isinstance(box, PackerBase):
box.pad = box.pad * cor
box.sep = box.sep * cor
elif isinstance(box, DrawingArea):
box.width = self.handlelength*fontsize
box.height = height
box.xdescent = 0.
box.ydescent=descent
self._last_fontsize_points = fontsize
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
assert self.isaxes # should always hold because function is only called internally
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self._drawFrame = b
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch', [h for h in self.legendHandles if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def get_window_extent(self):
'return a extent of the the legend'
return self.legendPatch.get_window_extent()
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding "best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self.fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
assert self.isaxes # should always hold because function is only called internally
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.parent.bbox, renderer) for x in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
|
agpl-3.0
|
evgchz/scikit-learn
|
sklearn/cross_validation.py
|
6
|
62788
|
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import _num_samples, check_array
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
__all__ = ['Bootstrap',
'KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n, indices=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
self._indices = indices
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
indices = self._indices
if indices:
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
if indices:
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p, indices=None):
super(LeavePOut, self).__init__(n, indices)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, indices, shuffle, random_state):
super(_BaseKFold, self).__init__(n, indices)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, indices, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, indices, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = np.bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels, indices=None):
super(LeaveOneLabelOut, self).__init__(len(labels), indices)
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p, indices=None):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels), indices)
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class Bootstrap(object):
"""Random sampling with replacement cross-validation iterator
Provides train/test indices to split data in train test sets
while resampling the input n_iter times: each time a new
random split of the data is performed and then samples are drawn
(with replacement) on each side of the split to build the training
and test sets.
Note: contrary to other cross-validation strategies, bootstrapping
will allow some samples to occur several times in each splits. However
a sample that occurs in the train split will never occur in the test
split and vice-versa.
If you want each sample to occur at most once you should probably
use ShuffleSplit cross validation instead.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default is 3)
Number of bootstrapping iterations
train_size : int or float (default is 0.5)
If int, number of samples to include in the training split
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split.
test_size : int or float or None (default is None)
If int, number of samples to include in the training set
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split.
If None, n_test is set as the complement of n_train.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> bs = cross_validation.Bootstrap(9, random_state=0)
>>> len(bs)
3
>>> print(bs)
Bootstrap(9, n_iter=3, train_size=5, test_size=4, random_state=0)
>>> for train_index, test_index in bs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [1 8 7 7 8] TEST: [0 3 0 5]
TRAIN: [5 4 2 4 2] TEST: [6 7 1 0]
TRAIN: [4 7 0 1 1] TEST: [5 3 6 5]
See also
--------
ShuffleSplit: cross validation using random permutations.
"""
# Static marker to be able to introspect the CV type
indices = True
def __init__(self, n, n_iter=3, train_size=.5, test_size=None,
random_state=None, n_bootstraps=None):
# See, e.g., http://youtu.be/BzHz0J9a6k0?t=9m38s for a motivation
# behind this deprecation
warnings.warn("Bootstrap will no longer be supported as a " +
"cross-validation method as of version 0.15 and " +
"will be removed in 0.17", DeprecationWarning)
self.n = n
if n_bootstraps is not None: # pragma: no cover
warnings.warn("n_bootstraps was renamed to n_iter and will "
"be removed in 0.16.", DeprecationWarning)
n_iter = n_bootstraps
self.n_iter = n_iter
if (isinstance(train_size, numbers.Real) and train_size >= 0.0
and train_size <= 1.0):
self.train_size = int(ceil(train_size * n))
elif isinstance(train_size, numbers.Integral):
self.train_size = train_size
else:
raise ValueError("Invalid value for train_size: %r" %
train_size)
if self.train_size > n:
raise ValueError("train_size=%d should not be larger than n=%d" %
(self.train_size, n))
if isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0:
self.test_size = int(ceil(test_size * n))
elif isinstance(test_size, numbers.Integral):
self.test_size = test_size
elif test_size is None:
self.test_size = self.n - self.train_size
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if self.test_size > n - self.train_size:
raise ValueError(("test_size + train_size=%d, should not be " +
"larger than n=%d") %
(self.test_size + self.train_size, n))
self.random_state = random_state
def __iter__(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_train = permutation[:self.train_size]
ind_test = permutation[self.train_size:self.train_size
+ self.test_size]
# bootstrap in each split individually
train = rng.randint(0, self.train_size,
size=(self.train_size,))
test = rng.randint(0, self.test_size,
size=(self.test_size,))
yield ind_train[train], ind_test[test]
def __repr__(self):
return ('%s(%d, n_iter=%d, train_size=%d, test_size=%d, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
self.train_size,
self.test_size,
self.random_state,
))
def __len__(self):
return self.n_iter
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if n_iterations is not None: # pragma: no cover
warnings.warn("n_iterations was renamed to n_iter for consistency "
" and will be removed in 0.16.")
self.n_iter = n_iterations
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._indices = indices
self.n_train, self.n_test = _validate_shuffle_split(n,
test_size,
train_size)
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
if self._indices:
for train, test in self._iter_indices():
yield train, test
return
for train, test in self._iter_indices():
train_m = np.zeros(self.n, dtype=bool)
test_m = np.zeros(self.n, dtype=bool)
train_m[train] = True
test_m[test] = True
yield train_m, test_m
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
See also
--------
Bootstrap: cross-validation using re-sampling with replacement.
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, indices, random_state,
n_iterations)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(np.bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = np.bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(np.bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
##############################################################################
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
n_samples = _num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust lenght of sample weights
n_samples = _num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
def _check_cv(cv, X=None, y=None, classifier=False, warn_mask=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
needs_indices = is_sparse or not hasattr(X, "shape")
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if warn_mask and not needs_indices:
warnings.warn('check_cv will return indices instead of boolean '
'masks from 0.17', DeprecationWarning)
else:
needs_indices = None
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv, indices=needs_indices)
else:
cv = KFold(_num_samples(y), cv, indices=needs_indices)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv, indices=needs_indices)
if needs_indices and not getattr(cv, "_indices", True):
raise ValueError("Sparse data and lists require indices-based cross"
" validation generator, got: %r", cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.")
force_arrays = options.pop('force_arrays', False)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if force_arrays:
warnings.warn("The force_arrays option is deprecated and will be "
"removed in 0.18.", DeprecationWarning)
arrays = [check_array(x, 'csr', ensure_2d=False,
force_all_finite=False) if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
|
bsd-3-clause
|
jhnnsnk/nest-simulator
|
pynest/examples/correlospinmatrix_detector_two_neuron.py
|
12
|
2587
|
# -*- coding: utf-8 -*-
#
# correlospinmatrix_detector_two_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Correlospinmatrix detector example
----------------------------------------
This scripts simulates two connected binary neurons, similar
as in [1]_. It measures and plots the auto- and cross covariance functions
of the individual neurons and between them, repsectively.
References
~~~~~~~~~~~~
.. [1] Ginzburg and Sompolinsky (1994). Theory of correlations in stochastic neural netoworks. 50(4) p. 3175. Fig. 1.
"""
import matplotlib.pyplot as plt
import nest
import numpy as np
m_x = 0.5
tau_m = 10.
h = 0.1
T = 1000000.
tau_max = 100.
csd = nest.Create("correlospinmatrix_detector")
csd.set(N_channels=2, tau_max=tau_max, Tstart=tau_max, delta_tau=h)
nest.SetDefaults('ginzburg_neuron', {'theta': 0.0, 'tau_m': tau_m,
'c_1': 0.0, 'c_2': 2. * m_x, 'c_3': 1.0})
n1 = nest.Create("ginzburg_neuron")
nest.SetDefaults("mcculloch_pitts_neuron", {'theta': 0.5, 'tau_m': tau_m})
n2 = nest.Create("mcculloch_pitts_neuron")
nest.Connect(n1, n2, syn_spec={"weight": 1.0})
nest.Connect(n1, csd, syn_spec={"receptor_type": 0})
nest.Connect(n2, csd, syn_spec={"receptor_type": 1})
nest.Simulate(T)
c = csd.get("count_covariance")
m = np.zeros(2, dtype=float)
for i in range(2):
m[i] = c[i][i][int(tau_max / h)] * (h / T)
print('mean activities =', m)
cmat = np.zeros((2, 2, int(2 * tau_max / h) + 1), dtype=float)
for i in range(2):
for j in range(2):
cmat[i, j] = c[i][j] * (h / T) - m[i] * m[j]
ts = np.arange(-tau_max, tau_max + h, h)
plt.title("auto- and cross covariance functions")
plt.plot(ts, cmat[0, 1], 'r', label=r"$c_{12}$")
plt.plot(ts, cmat[1, 0], 'b', label=r"$c_{21}$")
plt.plot(ts, cmat[0, 0], 'g', label=r"$c_{11}$")
plt.plot(ts, cmat[1, 1], 'y', label=r"$c_{22}$")
plt.xlabel(r"time $t \; \mathrm{ms}$")
plt.ylabel(r"$c$")
plt.legend()
plt.show()
|
gpl-2.0
|
cython-testbed/pandas
|
pandas/tests/scalar/timestamp/test_timezones.py
|
1
|
12514
|
# -*- coding: utf-8 -*-
"""
Tests for Timestamp timezone-related methods
"""
from datetime import datetime, date, timedelta
from distutils.version import LooseVersion
import pytest
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
import dateutil
from dateutil.tz import gettz, tzoffset
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import Timestamp, NaT
from pandas.errors import OutOfBoundsDatetime
class TestTimestampTZOperations(object):
# --------------------------------------------------------------
# Timestamp.tz_localize
def test_tz_localize_pushes_out_of_bounds(self):
# GH#12677
# tz_localize that pushes away from the boundary is OK
pac = Timestamp.min.tz_localize('US/Pacific')
assert pac.value > Timestamp.min.value
pac.tz_convert('Asia/Tokyo') # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime):
Timestamp.min.tz_localize('Asia/Tokyo')
# tz_localize that pushes away from the boundary is OK
tokyo = Timestamp.max.tz_localize('Asia/Tokyo')
assert tokyo.value < Timestamp.max.value
tokyo.tz_convert('US/Pacific') # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime):
Timestamp.max.tz_localize('US/Pacific')
def test_tz_localize_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
# GH#14402
ts = Timestamp('2015-11-01 01:00:03')
expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')
expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')
with pytest.raises(pytz.AmbiguousTimeError):
ts.tz_localize('US/Central')
result = ts.tz_localize('US/Central', ambiguous=True)
assert result == expected0
result = ts.tz_localize('US/Central', ambiguous=False)
assert result == expected1
def test_tz_localize_ambiguous(self):
ts = Timestamp('2014-11-02 01:00')
ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)
ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)
assert (ts_no_dst.value - ts_dst.value) / 1e9 == 3600
with pytest.raises(ValueError):
ts.tz_localize('US/Eastern', ambiguous='infer')
# GH#8025
with tm.assert_raises_regex(TypeError,
'Cannot localize tz-aware Timestamp, '
'use tz_convert for conversions'):
Timestamp('2011-01-01', tz='US/Eastern').tz_localize('Asia/Tokyo')
with tm.assert_raises_regex(TypeError,
'Cannot convert tz-naive Timestamp, '
'use tz_localize to localize'):
Timestamp('2011-01-01').tz_convert('Asia/Tokyo')
@pytest.mark.parametrize('stamp, tz', [
('2015-03-08 02:00', 'US/Eastern'),
('2015-03-08 02:30', 'US/Pacific'),
('2015-03-29 02:00', 'Europe/Paris'),
('2015-03-29 02:30', 'Europe/Belgrade')])
def test_tz_localize_nonexistent(self, stamp, tz):
# GH#13057
ts = Timestamp(stamp)
with pytest.raises(NonExistentTimeError):
ts.tz_localize(tz)
with pytest.raises(NonExistentTimeError):
ts.tz_localize(tz, errors='raise')
assert ts.tz_localize(tz, errors='coerce') is NaT
def test_tz_localize_errors_ambiguous(self):
# GH#13057
ts = Timestamp('2015-11-1 01:00')
with pytest.raises(AmbiguousTimeError):
ts.tz_localize('US/Pacific', errors='coerce')
@pytest.mark.parametrize('stamp', ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00'])
def test_tz_localize_roundtrip(self, stamp, tz_aware_fixture):
tz = tz_aware_fixture
ts = Timestamp(stamp)
localized = ts.tz_localize(tz)
assert localized == Timestamp(stamp, tz=tz)
with pytest.raises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset == ts
assert reset.tzinfo is None
def test_tz_localize_ambiguous_compat(self):
# validate that pytz and dateutil are compat for dst
# when the transition happens
naive = Timestamp('2013-10-27 01:00:00')
pytz_zone = 'Europe/London'
dateutil_zone = 'dateutil/Europe/London'
result_pytz = naive.tz_localize(pytz_zone, ambiguous=0)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=0)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382835600000000000
if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# dateutil 2.6 buggy w.r.t. ambiguous=0
# see gh-14621
# see https://github.com/dateutil/dateutil/issues/321
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
assert str(result_pytz) == str(result_dateutil)
elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert result_pytz.to_pydatetime().tzname() == 'GMT'
assert result_dateutil.to_pydatetime().tzname() == 'BST'
assert str(result_pytz) != str(result_dateutil)
# 1 hour difference
result_pytz = naive.tz_localize(pytz_zone, ambiguous=1)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=1)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382832000000000000
# dateutil < 2.6 is buggy w.r.t. ambiguous timezones
if LooseVersion(dateutil.__version__) > LooseVersion('2.5.3'):
# see gh-14621
assert str(result_pytz) == str(result_dateutil)
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern'),
'US/Eastern', 'dateutil/US/Eastern'])
def test_timestamp_tz_localize(self, tz):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize(tz)
expected = Timestamp('3/11/2012 04:00', tz=tz)
assert result.hour == expected.hour
assert result == expected
# ------------------------------------------------------------------
# Timestamp.tz_convert
@pytest.mark.parametrize('stamp', ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00'])
def test_tz_convert_roundtrip(self, stamp, tz_aware_fixture):
tz = tz_aware_fixture
ts = Timestamp(stamp, tz='UTC')
converted = ts.tz_convert(tz)
reset = converted.tz_convert(None)
assert reset == Timestamp(stamp)
assert reset.tzinfo is None
assert reset == converted.tz_convert('UTC').tz_localize(None)
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_astimezone(self, tzstr):
# astimezone is an alias for tz_convert, so keep it with
# the tz_convert tests
utcdate = Timestamp('3/11/2012 22:00', tz='UTC')
expected = utcdate.tz_convert(tzstr)
result = utcdate.astimezone(tzstr)
assert expected == result
assert isinstance(result, Timestamp)
@td.skip_if_windows
def test_tz_convert_utc_with_system_utc(self):
from pandas._libs.tslibs.timezones import maybe_get_tz
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# ------------------------------------------------------------------
# Timestamp.__init__ with tz str or tzinfo
def test_timestamp_constructor_tz_utc(self):
utc_stamp = Timestamp('3/11/2012 05:00', tz='utc')
assert utc_stamp.tzinfo is pytz.utc
assert utc_stamp.hour == 5
utc_stamp = Timestamp('3/11/2012 05:00').tz_localize('utc')
assert utc_stamp.hour == 5
def test_timestamp_to_datetime_tzoffset(self):
tzinfo = tzoffset(None, 7200)
expected = Timestamp('3/11/2012 04:00', tz=tzinfo)
result = Timestamp(expected.to_pydatetime())
assert expected == result
def test_timestamp_constructor_near_dst_boundary(self):
# GH#11481 & GH#15777
# Naive string timestamps were being localized incorrectly
# with tz_convert_single instead of tz_localize_to_utc
for tz in ['Europe/Brussels', 'Europe/Prague']:
result = Timestamp('2015-10-25 01:00', tz=tz)
expected = Timestamp('2015-10-25 01:00').tz_localize(tz)
assert result == expected
with pytest.raises(pytz.AmbiguousTimeError):
Timestamp('2015-10-25 02:00', tz=tz)
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00').tz_localize('Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
# GH#11708
naive = Timestamp('2015-11-18 10:00:00')
result = naive.tz_localize('UTC').tz_convert('Asia/Kolkata')
expected = Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')
assert result == expected
# GH#15823
result = Timestamp('2017-03-26 00:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 00:00:00+0100', tz='Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00:00+0100', tz='Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
result = Timestamp('2017-03-26 02:00:00+0100', tz='Europe/Paris')
naive = Timestamp(result.value)
expected = naive.tz_localize('UTC').tz_convert('Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 03:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 03:00:00+0200', tz='Europe/Paris')
assert result == expected
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern'),
'US/Eastern', 'dateutil/US/Eastern'])
def test_timestamp_constructed_by_date_and_tz(self, tz):
# GH#2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=tz)
expected = Timestamp('3/11/2012', tz=tz)
assert result.hour == expected.hour
assert result == expected
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern'),
'US/Eastern', 'dateutil/US/Eastern'])
def test_timestamp_add_timedelta_push_over_dst_boundary(self, tz):
# GH#1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz=tz)
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz=tz)
assert result == expected
def test_timestamp_timetz_equivalent_with_datetime_tz(self,
tz_naive_fixture):
# GH21358
if tz_naive_fixture is not None:
tz = dateutil.tz.gettz(tz_naive_fixture)
else:
tz = None
stamp = Timestamp('2018-06-04 10:20:30', tz=tz)
_datetime = datetime(2018, 6, 4, hour=10,
minute=20, second=30, tzinfo=tz)
result = stamp.timetz()
expected = _datetime.timetz()
assert result == expected
|
bsd-3-clause
|
mschwarzer/citolytics-docker
|
evaluation/eventlogging.py
|
1
|
9079
|
"""
Data extraction for event logging evaluation
tables: MobileWikiAppArticleSuggestions (), MobileWikiAppArticleSuggestions_items, MobileWikiAppPageScroll, MobileWikiAppSessions
extract page ids (already in mediawiki table)
## long CTR
### details
SELECT i.appInstallID, i.pageTitle, i.readMoreItem, i.clicked, s.maxPercentViewed, s.timeSpent, i.timestamp, TIMEDIFF(s.timestamp, i.timestamp)
FROM MobileWikiAppArticleSuggestions_items i
JOIN page p ON p.page_title = REPLACE( i.readMoreItem, " ", "_")
JOIN MobileWikiAppPageScroll s
ON s.pageID = p.page_id
AND s.appInstallID = i.appInstallID
AND s.timestamp > i.timestamp
AND TIMEDIFF(s.timestamp, i.timestamp) < 600;
### summary (min timespent 10)
SELECT y.readMoreSource, SUM(y.clicked) as longClicks, AVG(y.timeSpent) as avgTimeSpent, AVG(y.maxPercentViewed) as avgMaxPercentViewed
FROM (
SELECT i.readMoreSource, i.pageTitle, i.clicked, x.*, COUNT(*), TIMEDIFF(x.timestamp, i.timestamp)
FROM MobileWikiAppArticleSuggestions_items i
JOIN (
SELECT p.page_title, s.appInstallID, s.timestamp, s.timeSpent, s.maxPercentViewed
FROM page p
JOIN MobileWikiAppPageScroll s
ON s.pageID = p.page_id
GROUP BY p.page_id, s.appInstallID, s.timestamp
) x
ON x.page_title = REPLACE( i.readMoreItem, " ", "_")
AND x.appInstallID = i.appInstallID
WHERE i.clicked = 1
AND x.timestamp > i.timestamp
AND TIMEDIFF(x.timestamp, i.timestamp) < 600
AND timeSpent > 10
GROUP BY i.appInstallID, i.timestamp
) y
GROUP BY y.readMoreSource;
### old
ELECT i.readMoreSource, SUM(i.clicked) as longClicks, AVG(s.timeSpent) as avgTimeSpent, AVG(s.maxPercentViewed) as avgMaxPercentViewed
FROM MobileWikiAppArticleSuggestions_items i
LEFT JOIN page p
ON p.page_title = REPLACE( i.readMoreItem, " ", "_")
LEFT JOIN MobileWikiAppPageScroll s
ON s.pageID = p.page_id
AND s.appInstallID = i.appInstallID
AND s.timestamp > i.timestamp
AND TIMEDIFF(s.timestamp, i.timestamp) < 600
WHERE i.clicked = 1
GROUP BY i.readMoreSource;
SELECT i.readMoreSource, SUM(i.clicked) as longClicks, COUNT(*)
FROM MobileWikiAppArticleSuggestions_items i
WHERE i.clicked = 1
GROUP BY i.readMoreSource;
## sessions stats
### group 10min sessions per user
SELECT x.readMoreSource, COUNT(*) as sessionCount, SUM(x.totalPages) as totalPages, AVG(x.totalPages) as avgTotalPages,
AVG(x.length) as avgLength, AVG(x.fromSearch) as avgFromSearch, AVG(x.fromRandom) as avgFromRandom, AVG(x.fromInternal) as avgFromInternal, AVG(x.fromBack) as avgFromBack
FROM (
SELECT i.readMoreSource, i.appInstallID, s.length, s.totalPages, s.fromSearch, s.fromRandom, s.fromInternal, s.fromBack,
s.timestamp, ROUND(TIMEDIFF(NOW(), s.timestamp) / 600) as sessionTime
FROM MobileWikiAppSessions s
JOIN MobileWikiAppArticleSuggestions i
ON i.appInstallID = s.appInstallID
GROUP BY ROUND(TIMEDIFF(NOW(), s.timestamp) / 600)
ORDER BY s.appInstallID, s.timestamp
) as x
GROUP BY x.readMoreSource;
"""
import numpy as np
import pandas as pd
import MySQLdb
#from tabulate import tabulate
from _mysql_exceptions import OperationalError
read_more_sources = {
1: 'MLT', # or 2
2: 'Citolytics'
}
read_more_source_mlt = 1 # or 2
read_more_source_citolytics = 3
class ELEvaluation(object):
def __init__(self, db_host, db_user, db_password, db_name):
try:
self.db = MySQLdb.connect(host=db_host, # your host, usually localhost
user=db_user, # your username
passwd=db_password, # your password
db=db_name)
self.cur = self.db.cursor(MySQLdb.cursors.DictCursor)
except OperationalError:
print('Error: Cannot connect to MySQL server')
exit(1)
def set_read_more_source_label(self, df):
df['readMoreSource'] = df.apply(lambda r: read_more_sources[int(r['readMoreSource'])], axis=1)
return df
def get_event_time_series(self, table='MobileWikiAppArticleSuggestions'):
sql = 'SELECT `timestamp`, COUNT(*) as `count`' \
+ ' FROM ' + table \
+ ' GROUP BY DATE_FORMAT(`timestamp`, "%Y-%m-%d %H:00:00") ORDER BY `timestamp`'
self.cur.execute(sql)
datetimes = []
counts = []
for r in self.cur.fetchall():
#print(r)
datetimes.append(r['timestamp'])
counts.append([r['count']])
return datetimes, counts
def get_most_recommended_items(self, limit=10):
return pd.read_sql('SELECT readMoreItem, COUNT(*) as views FROM MobileWikiAppArticleSuggestions_items WHERE clicked = 0 GROUP BY readMoreItem ORDER BY COUNT(*) DESC LIMIT %i' % limit, con=self.db)
def get_most_clicked_items(self, limit=10):
return pd.read_sql('SELECT readMoreItem, COUNT(*) as clicks FROM MobileWikiAppArticleSuggestions_items WHERE clicked = 1 GROUP BY readMoreItem ORDER BY COUNT(*) DESC LIMIT %i' % limit, con=self.db)
def get_stats_per_source(self):
# views divided by 3?
df = pd.read_sql('SELECT readMoreSource, SUM(clicked) as clicks, COUNT(*) as views, SUM(clicked) / COUNT(*) as ctr ' \
' FROM MobileWikiAppArticleSuggestions_items' \
' GROUP BY readMoreSource', con=self.db)
return self.set_read_more_source_label(df)
def get_long_stats_per_source(self, minTimeSpent=10, minPercentViewed=50):
# join with views?
sql = 'SELECT y.readMoreSource, SUM(y.clicked) as longClicks, AVG(y.timeSpent) as avgTimeSpent, AVG(y.maxPercentViewed) as avgMaxPercentViewed' \
' FROM (' \
' SELECT i.readMoreSource, i.pageTitle, i.clicked, x.*, COUNT(*), TIMEDIFF(x.timestamp, i.timestamp)' \
' FROM MobileWikiAppArticleSuggestions_items i' \
' JOIN (' \
' SELECT p.page_title, s.appInstallID, s.timestamp, s.timeSpent, s.maxPercentViewed' \
' FROM page p' \
' JOIN MobileWikiAppPageScroll s' \
' ON s.pageID = p.page_id' \
' GROUP BY p.page_id, s.appInstallID, s.timestamp' \
' ) x' \
' ON x.page_title = REPLACE( i.readMoreItem, " ", "_")' \
' AND x.appInstallID = i.appInstallID' \
' WHERE i.clicked = 1' \
' AND x.timestamp > i.timestamp' \
' AND TIMEDIFF(x.timestamp, i.timestamp) < 600'
if minTimeSpent > 0:
sql += ' AND timeSpent > %i' % minTimeSpent
if minPercentViewed > 0:
sql += ' AND maxPercentViewed > %i' % minPercentViewed
sql += ' GROUP BY i.appInstallID, i.timestamp' \
' ) y' \
' GROUP BY y.readMoreSource'
df = pd.read_sql(sql, con=self.db)
return self.set_read_more_source_label(df)
def get_session_stats(self):
return pd.read_sql('SELECT COUNT(*), SUM(length), SUM(totalPages), SUM(fromRandom), SUM(fromSearch, AVG(length), AVG(totalPages), AVG(fromRandom), AVG(fromSearch) FROM MobileWikiAppSessions', con=self.db)
def get_session_stats_per_source(self):
sql = 'SELECT x.readMoreSource, COUNT(*) as sessionCount, SUM(x.totalPages) as totalPages, AVG(x.totalPages) as avgTotalPages,' \
' AVG(x.length) as avgLength, AVG(x.fromSearch) as avgFromSearch, AVG(x.fromRandom) as avgFromRandom, AVG(x.fromInternal) as avgFromInternal, AVG(x.fromBack) as avgFromBack' \
' FROM (' \
' SELECT i.readMoreSource, i.appInstallID, s.length, s.totalPages, s.fromSearch, s.fromRandom, s.fromInternal, s.fromBack,' \
' s.timestamp, ROUND(TIMEDIFF(NOW(), s.timestamp) / 600) as sessionTime' \
' FROM MobileWikiAppSessions s' \
' JOIN MobileWikiAppArticleSuggestions i' \
' ON i.appInstallID = s.appInstallID' \
' GROUP BY ROUND(TIMEDIFF(NOW(), s.timestamp) / 600)' \
' ORDER BY s.appInstallID, s.timestamp' \
' ) as x' \
' GROUP BY x.readMoreSource'
df = pd.read_sql(sql, con=self.db)
return self.set_read_more_source_label(df)
def get_page_scroll_stats(self):
return pd.read_sql('SELECT COUNT(*), AVG(maxPercentViewed), AVG(timeSpent), SUM(timeSpent) FROM MobileWikiAppPageScroll', con=self.db)
def get_metric_stats(self):
sql = "SELECT COUNT(*) as events FROM MobileWikiAppArticleSuggestions"
self.cur.execute(sql)
events = self.cur.fetchall()[0]['events']
sql = "SELECT COUNT(DISTINCT appInstallID) as users FROM MobileWikiAppArticleSuggestions"
self.cur.execute(sql)
users = self.cur.fetchall()[0]['users']
#print('Users collected: %i' % )
df = pd.DataFrame.from_dict({
'_metric': [
'users',
'suggestions'
],
'count': [
users,
events
]
})
return df
|
mit
|
q1ang/scikit-learn
|
sklearn/tests/test_discriminant_analysis.py
|
35
|
11709
|
try:
# Python 2 compat
reload
except NameError:
# Regular Python 3+ import
from importlib import reload
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
clf.fit(X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
|
bsd-3-clause
|
wlamond/scikit-learn
|
examples/svm/plot_custom_kernel.py
|
93
|
1562
|
"""
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolors='k')
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
glorizen/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/interpolate.py
|
73
|
7068
|
import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation) we
can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point of
the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also construct
what is called the Voronoi diagram from a Delaunay triangulation by
connecting the circumcenters of the triangles to those of their neighbors to
form a tesselation of irregular polygons covering the plane and containing
only one node from the triangulation. Each point in one node's Voronoi
polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors of
this point as the set of nodes participating in Delaunay triangles whose
circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronooi diagram would have a polygon around the inserted
point. This polygon would "steal" area from the original Voronoi polygons.
For each node i in the natural neighbors set, we compute the area stolen
from its original Voronoi polygon, stolen[i]. We define the natural
neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered by
the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
|
agpl-3.0
|
BiaDarkia/scikit-learn
|
sklearn/neural_network/rbm.py
|
17
|
12274
|
"""Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from scipy.special import expit # logistic function
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
|
bsd-3-clause
|
JohanComparat/nbody-npt-functions
|
bin/bin_DF/test_scripts/DF_Mvir_otherProjection.py
|
1
|
2404
|
import numpy as n
import os
from os.path import join
from astropy.io import fits
import time
import fortranfile
import glob
import cPickle
import sys
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
# loads the density field
# listof parameters :
#DFdir = join("..", "MDPL")
DFfile = join("dmdens_cic_087.dat")
Halofile = join("hlist_0.40320_PM.DF.fits")
Lbox = 1000.
grid = 2048
qty = 'mvir'
dx = Lbox/grid
NNs =[8, 16, 32, 64 ]
massRanges=[[11,12],[12,13],[13,14]]
for NN in NNs:
for massRange in massRanges:
print NN, massRange
massMin = massRange[0]
massMax = massRange[1]
dxN = NN * dx
allSlices = n.array(glob.glob( "/users/jcomparat/skies_universes_production/MultiDark/onePtFct/slice"+str(NN)+"/occupationDATA_raw/*"+str(massMin)+".M."+str(massMax)+".pkl"))
for jjj, path_to_slice in enumerate (allSlices):
path_to_plot = path_to_slice.replace("occupationDATA_raw", "meanDelta_plot")[:-4]+".png"
path_to_plot_d = path_to_slice.replace("occupationDATA_raw", "distrDelta_plot")[:-4]+".png"
f=open(path_to_slice, 'r')
DF_rs,NH = cPickle.load(f)
f.close()
#figure halos
binsNH = n.arange(-0.5,n.max(NH)+1,1)
xNH = (binsNH[1:]+binsNH[:-1])/2.
deltaMass = n.zeros(len(binsNH[:-1]))
deltaMassSTD = n.zeros(len(binsNH[:-1]))
#compute occupation
for numBin, bin in enumerate(binsNH[:-1]):
ids = n.where( (NH>=binsNH[numBin])&(NH<binsNH[numBin+1]) )
print bin, len(DF_rs[ids])
deltaMass[numBin] = n.mean(DF_rs[ids], axis=0)
deltaMassSTD[numBin] = n.std(DF_rs[ids], axis=0)
# figure occupation
p.figure(1)
p.plot(xNH, deltaMass, 'b')
p.plot(xNH, deltaMass-deltaMassSTD, 'b--')
p.plot(xNH, deltaMass+deltaMassSTD, 'b--')
p.xlabel('N halo per cell in '+str(massMin)+".M."+str(massMax))
p.ylabel('mean value of DF')
p.title("cell="+str(dxN)+"Mpc/h")
p.grid()
p.savefig(path_to_plot)
p.clf()
p.figure(1)
for numBin, bin in enumerate(binsNH[:-1]):
ids = n.where( (NH>=binsNH[numBin])&(NH<binsNH[numBin+1]) )
#print numBin, len(DF_rs[ids]), DF_rs[ids]
if len(DF_rs[ids])>10 :
print
p.hist(DF_rs[ids], normed=True, label=str(xNH[numBin]), histtype='step', bins=20)
p.legend(loc=0, fontsize=10)
p.xlabel('log(1+delta)')
p.ylabel('normed histogram')
p.title("cell="+str(dxN)+"Mpc/h")
p.grid()
p.savefig(path_to_plot_d)
p.clf()
|
cc0-1.0
|
WindCanDie/spark
|
python/pyspark/sql/tests/test_pandas_udf_window.py
|
7
|
12906
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.utils import AnalysisException
from pyspark.sql.functions import array, explode, col, lit, mean, min, max, rank, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.window import Window
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
return udf(lambda v: v + 1, 'double')
@property
def pandas_scalar_time_two(self):
return pandas_udf(lambda v: v * 2, 'double')
@property
def pandas_agg_count_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('long', PandasUDFType.GROUPED_AGG)
def count(v):
return len(v)
return count
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_max_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max(v):
return v.max()
return max
@property
def pandas_agg_min_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def min(v):
return v.min()
return min
@property
def unbounded_window(self):
return Window.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing).orderBy('v')
@property
def ordered_window(self):
return Window.partitionBy('id').orderBy('v')
@property
def unpartitioned_window(self):
return Window.partitionBy()
@property
def sliding_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, 1)
@property
def sliding_range_window(self):
return Window.partitionBy('id').orderBy('v').rangeBetween(-2, 4)
@property
def growing_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(Window.unboundedPreceding, 3)
@property
def growing_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(Window.unboundedPreceding, 4)
@property
def shrinking_row_window(self):
return Window.partitionBy('id').orderBy('v').rowsBetween(-2, Window.unboundedFollowing)
@property
def shrinking_range_window(self):
return Window.partitionBy('id').orderBy('v') \
.rangeBetween(-3, Window.unboundedFollowing)
def test_simple(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_multiple_udfs(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('mean_v', self.pandas_agg_mean_udf(df['v']).over(w)) \
.withColumn('max_v', self.pandas_agg_max_udf(df['v']).over(w)) \
.withColumn('min_w', self.pandas_agg_min_udf(df['w']).over(w))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w)) \
.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('min_w', min(df['w']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_replace_existing(self):
df = self.data
w = self.unbounded_window
result1 = df.withColumn('v', self.pandas_agg_mean_udf(df['v']).over(w))
expected1 = df.withColumn('v', mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
df = self.data
w = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v', mean_udf(df['v'] * 2).over(w) + 1)
expected1 = df.withColumn('v', mean(df['v'] * 2).over(w) + 1)
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_udf(self):
df = self.data
w = self.unbounded_window
plus_one = self.python_plus_one
time_two = self.pandas_scalar_time_two
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn(
'v2',
plus_one(mean_udf(plus_one(df['v'])).over(w)))
expected1 = df.withColumn(
'v2',
plus_one(mean(plus_one(df['v'])).over(w)))
result2 = df.withColumn(
'v2',
time_two(mean_udf(time_two(df['v'])).over(w)))
expected2 = df.withColumn(
'v2',
time_two(mean(time_two(df['v'])).over(w)))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_without_partitionBy(self):
df = self.data
w = self.unpartitioned_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('v2', mean_udf(df['v']).over(w))
expected1 = df.withColumn('v2', mean(df['v']).over(w))
result2 = df.select(mean_udf(df['v']).over(w))
expected2 = df.select(mean(df['v']).over(w))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
def test_mixed_sql_and_udf(self):
df = self.data
w = self.unbounded_window
ow = self.ordered_window
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min_udf(df['v']).over(w))
expected1 = df.withColumn('v_diff', max(df['v']).over(w) - min(df['v']).over(w))
# Test mixing sql window function and window udf in the same expression
result2 = df.withColumn('v_diff', max_udf(df['v']).over(w) - min(df['v']).over(w))
expected2 = expected1
# Test chaining sql aggregate function and udf
result3 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('min_v', min(df['v']).over(w)) \
.withColumn('v_diff', col('max_v') - col('min_v')) \
.drop('max_v', 'min_v')
expected3 = expected1
# Test mixing sql window function and udf
result4 = df.withColumn('max_v', max_udf(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
expected4 = df.withColumn('max_v', max(df['v']).over(w)) \
.withColumn('rank', rank().over(ow))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_array_type(self):
df = self.data
w = self.unbounded_window
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.withColumn('v2', array_udf(df['v']).over(w))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
w = self.unbounded_window
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'.*not supported within a window function'):
foo_udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
df.withColumn('v2', foo_udf(df['v']).over(w))
def test_bounded_simple(self):
from pyspark.sql.functions import mean, max, min, count
df = self.data
w1 = self.sliding_row_window
w2 = self.shrinking_range_window
plus_one = self.python_plus_one
count_udf = self.pandas_agg_count_udf
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
min_udf = self.pandas_agg_min_udf
result1 = df.withColumn('mean_v', mean_udf(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count_udf(df['v']).over(w2)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('min_v', min_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(plus_one(df['v'])).over(w1)) \
.withColumn('count_v', count(df['v']).over(w2)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('min_v', min(df['v']).over(w1))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_growing_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.growing_row_window
w2 = self.growing_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_sliding_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.sliding_row_window
w2 = self.sliding_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_shrinking_window(self):
from pyspark.sql.functions import mean
df = self.data
w1 = self.shrinking_row_window
w2 = self.shrinking_range_window
mean_udf = self.pandas_agg_mean_udf
result1 = df.withColumn('m1', mean_udf(df['v']).over(w1)) \
.withColumn('m2', mean_udf(df['v']).over(w2))
expected1 = df.withColumn('m1', mean(df['v']).over(w1)) \
.withColumn('m2', mean(df['v']).over(w2))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_bounded_mixed(self):
from pyspark.sql.functions import mean, max
df = self.data
w1 = self.sliding_row_window
w2 = self.unbounded_window
mean_udf = self.pandas_agg_mean_udf
max_udf = self.pandas_agg_max_udf
result1 = df.withColumn('mean_v', mean_udf(df['v']).over(w1)) \
.withColumn('max_v', max_udf(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean_udf(df['v']).over(w1))
expected1 = df.withColumn('mean_v', mean(df['v']).over(w1)) \
.withColumn('max_v', max(df['v']).over(w2)) \
.withColumn('mean_unbounded_v', mean(df['v']).over(w1))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_window import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
icbicket/SpectrumImageAnalysisPy
|
src/DeconvolutionExperiment.py
|
1
|
6710
|
from __future__ import division
import SpectrumImage
import Spectrum
import SpectrumImagePlotter
import SpectrumPlotter
import Image
import collections
import ImagePlotter
import os
import matplotlib.pyplot as plt
from astropy.modeling import models, fitting, powerlaws
import numpy as np
from sklearn import datasets, linear_model
filefolder = '/home/isobel/Documents/McMaster/EELS/2017-03-31 - inverse Sierpinskis/SiN3'
filename = 'EELS Spectrum Image (dark ref corrected).dm3'
SI = SpectrumImage.EELSSpectrumImage.LoadFromDM3(os.path.join(filefolder, filename))
SI_norm = SpectrumImage.EELSSpectrumImage(SI.Normalize())
P1 = SpectrumImagePlotter.SpectrumImagePlotter(SI)
P2 = SpectrumImagePlotter.SpectrumImagePlotter(SI_norm)
FWHM = Image.Image(SI.FindFW(0.5))
IP1 = ImagePlotter.ImagePlotter(FWHM)
plt.figure()
FWHM_line = SI.FindFW(0.5).flatten()
plt.plot(FWHM_line)
plt.figure()
FWHM_fft = np.fft.fft(FWHM_line)
plt.plot(FWHM_fft)
PSF = Spectrum.EELSSpectrum(np.average(np.average(SI.data, axis=0), axis=0))
SP1 = SpectrumPlotter.SpectrumManager(PSF, cmap=plt.get_cmap('nipy_spectral'))
L = models.Lorentz1D(amplitude=np.max(PSF.intensity)/2., x_0=0, fwhm=0.005)
G = models.Gaussian1D(amplitude=np.max(PSF.intensity), mean=0., stddev=0.005)
Lrange = np.arange(-20, 20, 0.005)
Lorentzmodel = Spectrum.EELSSpectrum(L(Lrange), SpectrumRange=Lrange)
Gaussmodel = Spectrum.EELSSpectrum(G(Lrange), SpectrumRange=Lrange)
SP1.update_spectrum(Lorentzmodel, 'Lorentz_model')
SP1.update_spectrum(Gaussmodel, 'Gauss_model')
LConvolved = np.convolve(L(Lrange)[::-1], PSF.intensity, 'same')
GConvolved = np.convolve(G(Lrange)[::-1], PSF.intensity, 'same')
minE = np.min(PSF.SpectrumRange)
maxE = np.max(PSF.SpectrumRange)
LConvolved = LConvolved/np.max(LConvolved) * np.max(PSF.intensity)
GConvolved = GConvolved/np.max(GConvolved) * np.max(PSF.intensity)
PSF_Lfat = Spectrum.EELSSpectrum(LConvolved)
PSF_Gfat = Spectrum.EELSSpectrum(GConvolved)
minI = PSF_Lfat.ZLP + (minE/PSF.dispersion)
maxI = PSF_Lfat.ZLP + (maxE/PSF.dispersion)+1
SP1.update_spectrum(PSF_Lfat, 'Lorentz fat')
SP1.update_spectrum(PSF_Gfat, 'Gauss fat')
FWHM_L = PSF_Lfat.FindFW(0.5)
FWHM_G = PSF_Gfat.FindFW(0.5)
upperlim = FWHM_line[FWHM_line<0.06935]
argminL = (np.abs(FWHM.data.flatten() - FWHM_L)).argmin()
argminL_0 = int(np.floor(argminL/FWHM.data.shape[1]))
argminL_1 = (FWHM.data.shape[1] * (argminL/FWHM.data.shape[1] - argminL_0))
PSF_ex1 = Spectrum.EELSSpectrum(SI.data[48, 47, :]/np.max(SI.data[48, 47, :])*np.max(PSF.intensity))
SP1.update_spectrum(PSF_ex1, 'close experiment')
diff_L = Spectrum.EELSSpectrum(PSF_ex1.intensity-PSF_Lfat.intensity[int(minI):int(maxI)]
, SpectrumRange=PSF_ex1.SpectrumRange)
SP1.update_spectrum(diff_L, 'Difference Lorentz')
SP1.add_legend()
LorentzFits = {}
LorentzModel = {}
LorentzConvolved = {}
PSF_Lorentz = {}
PSF_Lorentz_FWHM = collections.OrderedDict()
SP2 = SpectrumPlotter.SpectrumManager(PSF, cmap=plt.get_cmap('nipy_spectral'))
SP3 = SpectrumPlotter.SpectrumManager(PSF, cmap=plt.get_cmap('nipy_spectral'))
for ff in np.linspace(0.002, 0.091, 10):
LorentzFits[ff] = models.Lorentz1D(amplitude=np.max(PSF.intensity)/2., x_0=0, fwhm=ff)
LorentzModel[ff] = Spectrum.EELSSpectrum(LorentzFits[ff](Lrange), SpectrumRange=Lrange)
SP2.update_spectrum(LorentzModel[ff], str(ff))
LorentzConvolved[ff] = np.convolve(LorentzFits[ff](Lrange)[::-1], PSF.intensity, 'same')
LorentzConvolved[ff] = LorentzConvolved[ff]/np.max(LorentzConvolved[ff]) * np.max(PSF.intensity)
PSF_Lorentz[ff] = Spectrum.EELSSpectrum(LorentzConvolved[ff], SpectrumRange=Lrange)
PSF_Lorentz_FWHM[ff] = PSF_Lorentz[ff].FindFW(0.5)
SP3.update_spectrum(PSF_Lorentz[ff], str(ff))
for ff in np.linspace(0.001, 0.09, 10):
LorentzFits[ff] = models.Lorentz1D(amplitude=np.max(PSF.intensity), x_0=0, fwhm=ff)
LorentzModel[ff] = Spectrum.EELSSpectrum(LorentzFits[ff](Lrange), SpectrumRange=Lrange)
SP2.update_spectrum(LorentzModel[ff], str(ff))
LorentzConvolved[ff] = np.convolve(LorentzFits[ff](Lrange)[::-1], PSF.intensity, 'same')
LorentzConvolved[ff] = LorentzConvolved[ff]/np.max(LorentzConvolved[ff]) * np.max(PSF.intensity)
PSF_Lorentz[ff] = Spectrum.EELSSpectrum(LorentzConvolved[ff], SpectrumRange=Lrange)
PSF_Lorentz_FWHM[ff] = PSF_Lorentz[ff].FindFW(0.5)
SP3.update_spectrum(PSF_Lorentz[ff], (str(ff)+'full'))
SP2.add_legend()
SP3.add_legend()
plt.figure()
plt.plot(PSF_Lorentz_FWHM.values(), PSF_Lorentz_FWHM.keys(), 'ro')
regr = linear_model.LinearRegression()
y = np.array(PSF_Lorentz_FWHM.keys())
x = np.array(PSF_Lorentz_FWHM.values()).reshape((20,1))
regr.fit(x, y)
valuefit = regr.predict(x)
plt.plot(x, valuefit, 'b-')
PSF_SI = SpectrumImage.EELSSpectrumImage(np.ones((SI.size)) * PSF.intensity, ZLP=True, dispersion=SI.dispersion)
PSF_SI_m = SpectrumImage.EELSSpectrumImage(np.ones((SI.size)) * PSF.intensity, ZLP=True, dispersion=SI.dispersion)
L_FWHM_line = regr.predict(FWHM_line.reshape(len(FWHM_line), 1)).reshape(FWHM.size)
L_FWHM_line[L_FWHM_line < 0] = 0
xx_yy = np.array(np.meshgrid(range(FWHM.size[0]), range(FWHM.size[1])))
x = xx_yy[0].flatten()
y = xx_yy[1].flatten()
for xx, yy in zip(x,y):
if L_FWHM_line[xx, yy] > 0:
LorentzFit = models.Lorentz1D(amplitude=np.max(PSF.intensity), x_0=0, fwhm=L_FWHM_line[xx, yy])
PSF_Lorentz = np.convolve(LorentzFit(Lrange)[::-1][int(minI):int(maxI)], PSF.intensity, 'same')
MoffatFit = models.Moffat1D(amplitude=np.max(PSF.intensity), x_0=0, gamma=L_FWHM_line[xx, yy]/2., alpha=0.95)
PSF_Moffat = np.convolve(MoffatFit(Lrange)[::-1][int(minI):int(maxI)], PSF.intensity, 'same')
else:
PSF_Lorentz = PSF.intensity
PSF_Moffat = PSF.intensity
PSF_SI.data[xx, yy, :] = PSF_Lorentz/np.max(PSF_Lorentz)
PSF_SI_m.data[xx, yy, :] = PSF_Lorentz/np.max(PSF_Lorentz)
Spectest1 = Spectrum.EELSSpectrum(PSF_SI.data[46, 38, :], ZLP=True, dispersion=0.005)
Spectest2 = Spectrum.EELSSpectrum(SI.data[46, 38, :]/SI.data[38, 29, SI.ZLP], ZLP=True, dispersion=0.005)
SP4 = SpectrumPlotter.SpectrumManager(Spectest1, cmap=plt.get_cmap('nipy_spectral'))
SP4.update_spectrum(Spectest2, 'Raw data')
SP4.add_legend()
PSF_plot = SpectrumImagePlotter.SpectrumImagePlotter(PSF_SI)
RLiterations = 15
SI2 = SI.RLDeconvolution(RLiterations, PSF, threads=8)
PSF_SI2 = SI.RLDeconvolution_Adaptive(RLiterations, PSF_SI, threads=8)
#PSF_SI3 = SI.RLDeconvolution_Adaptive(RLiterations, PSF_SI_m, threads=8)
PSF_RL1 = SpectrumImagePlotter.SpectrumImagePlotter(SI2)
PSF_RL2 = SpectrumImagePlotter.SpectrumImagePlotter(PSF_SI2)
#PSF_RL3 = SpectrumImagePlotter.SpectrumImagePlotter(PSF_SI3)
PSF_RL2.ShowPlot()
|
bsd-3-clause
|
isaziconsulting/xray-desensitizer
|
src/clean_xrays.py
|
1
|
4307
|
import sys
import os
import warnings
import hashlib
import string
import gflags
from gflags import FLAGS
import progressbar
import pandas as pd
import Levenshtein as lv
from skimage import io
import crop_xray_vector as cxv
import OCR as ocr
# ignore warnings because ruins progress bar
warnings.filterwarnings("ignore", message="low contrast image")
# define input flags
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_string('inpath', 'data', 'path to input data')
gflags.DEFINE_string('outpath', 'processed_data', 'path to output data')
gflags.DEFINE_enum('clean', 'inpaint', ['inpaint', 'mask'], 'type of image processing to perform')
def find_files(path):
'''Recursively walks input directory and find all files within subdirectories.
Arguments:
path: Path to the input directory.
Returns:
all_files: All files as a path with corresponding subdirectories.
'''
all_files = []
if os.path.isdir(path):
subdirs = os.listdir(path)
for subdir in subdirs:
# recursively check next available directory until a file is found
all_files += find_files(path+'/'+subdir)
return all_files
else:
return [path]
def get_popular_string(all_strings):
'''Finds most likely OCR output string based on string median calculations.
Arguments:
all_strings: All different OCR strings corresponding to the same x-ray image.
Returns:
Most Likely string.
'''
return lv.median(list(map(lambda x: x.upper(), all_strings)))
def get_patient_info(diff_imgs):
'''Gets a unique hash of a patient name, the date that the x-ray was taken
and the gender of the patient.
Arguments:
diff_imgs: Array of multiple copies of the same x-ray, each copy has a different type
of image processing applied to it to affect the text in the image differently.
Returns:
patient_id: Unique hash of the patient name.
xray_datetime: Date and time that the x-ray was taken.
gender: Sex of the patient [M, F].
'''
patient_names = []
patient_births = []
xray_datetimes = []
genders = []
for img in diff_imgs:
# run OCR
patient_name, patient_birth, xray_datetime, gender = ocr.get_text_from_tesseract(img)
patient_names += [patient_name]
patient_births += [patient_birth]
xray_datetimes += [xray_datetime]
genders += [gender]
# get the most likely string from all images
patient_name = get_popular_string(patient_names)
patient_birth = get_popular_string(patient_births)
translator = str.maketrans('', '', string.punctuation)
patient_birth = str(patient_birth).translate(translator)
xray_datetime = get_popular_string(xray_datetimes)
gender = get_popular_string(genders)
# encrypt patient name
to_encrypt = patient_name+patient_birth
to_encrypt = to_encrypt.encode('utf-8')
patient_id = hashlib.sha256(to_encrypt).hexdigest()[:20]
return patient_id, xray_datetime, gender
def main(argv):
try:
# parse flags
argv = FLAGS(argv)
except gflags.FlagsError as e:
print('%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS))
sys.exit(1)
if FLAGS.debug:
print('non-flag arguments:', argv)
# retrieve all the files to be processed
all_files = find_files(FLAGS.inpath)
# define pandas dataframe
df = pd.DataFrame(columns=('patientID', 'xrayDateTime', 'gender', 'path'))
pbar = progressbar.ProgressBar(maxval=len(all_files))
pbar.start()
for i, name in enumerate(all_files):
# run image processing
new_image, all_images = cxv.run_system(name, FLAGS.clean)
write_name = name.replace(FLAGS.inpath, FLAGS.outpath)
split_name = write_name.rsplit('/', 1)[0]
if not os.path.exists(split_name):
os.makedirs(split_name)
io.imsave(write_name, new_image)
# run OCR for hash
patient_id, xray_datetime, gender = get_patient_info(all_images)
df.loc[i] = [patient_id, xray_datetime, gender, write_name]
# update progress bar
pbar.update(i)
pbar.finish()
df.to_csv(FLAGS.outpath+'/labelled_info.csv')
if __name__ == '__main__':
main(sys.argv)
|
mit
|
ViralLeadership/numpy
|
numpy/core/tests/test_multiarray.py
|
1
|
224786
|
from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import ctypes
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec, SkipTest
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
if i < len(inputs):
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
if ('out' in kw) and (kw['out'] is not None):
kw['out'] = np.asarray(kw['out'])
r = func(*inputs, **kw)
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_numpy_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
class CheckIndex(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return i
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), 1)
assert_equal(np.sin(dummy, out=a), 1)
assert_equal(np.sin(dummy, out=(a,)), 1)
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), 1)
assert_equal(np.modf(dummy, None, a), 2)
assert_equal(np.modf(dummy, dummy, a), 2)
assert_equal(np.modf(dummy, out=a), 1)
assert_equal(np.modf(dummy, out=(a,)), 1)
assert_equal(np.modf(dummy, out=(a, None)), 1)
assert_equal(np.modf(dummy, out=(a, dummy)), 1)
assert_equal(np.modf(dummy, out=(None, a)), 2)
assert_equal(np.modf(dummy, out=(dummy, a)), 2)
assert_equal(np.modf(a, out=(dummy, a)), 0)
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), 2)
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), 2)
assert_equal(np.add(dummy, dummy, out=(a,)), 2)
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unbuffered_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise io.IOError('Can not tell or seek')
f = io.open(self.filename, 'rb', buffering=0)
f.seek = fail
f.tell = fail
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_assign():
dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
def test_multiple_field_name_unicode(self):
def test_assign_unicode():
dt = np.dtype([("\u20B9", "f8"),
("B", "f8"),
("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_assign_unicode)
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(B, C), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
pstjohn/decay-methods
|
dose_dependence/fit_experimental_data.py
|
1
|
12476
|
# This file processes and fits decaying sinusoids to the raw
# bioluminescence data of the experimental application of small
# molecules KL001 and Longdaysin in increasing concentration. Data is
# imported from the data directory. Creates several figures to display
# the data in various formats. Saves the resulting fits in the
# experimental_fits directory.
import pandas as pd
import numpy as np
from scipy import stats
genes = ['bmal1', 'per2']
drugs = ['KL001', 'longdaysin']
frames = []
frame_list = []
# Loop over each of the four files, creating a dataframe. The
# highest concentrations of each drug lead to increasingly poor rhythms,
# and therefore these concentrations are ignored in parts of the fitting
# and analysis
for drug in drugs:
for gene in genes:
# Skipping first two columns (71uM set)
frames += [pd.DataFrame.from_csv('experimental_data/DataTH_' +
gene + '_' + drug +
'.csv').iloc[:,2:].T]
frame_list += [(drug, gene)]
sampling_period = np.diff(frames[0].columns.values).mean()
from methods.sinusoid_estimation import fit_data
def fit(row):
return fit_data(row.values, sampling_period, outliers=False, full=False)
frame_fits = []
# Loop over the frames, appling the fit_ts function and saving the
# results
for frame in frames:
# Drop the first three datapoints from the start of each trajectory
fitted = frame.iloc[:,3:].apply(fit, axis=1)
fitted = fitted.reset_index()
for i, item in enumerate(fitted['index']):
# Here we have to correct a strange naming inconsistency,
# allowing both columns to have the same index.
if item[-2:] == '.1':
fitted['index'][i] = float(item[:-4])
else: fitted['index'][i] = float(item[:-2])
frame_fits += [fitted]
# save output fits
for iden, fitted in zip(frame_list, frame_fits):
drug = iden[0]
gene = iden[1]
fitted.to_csv('experimental_fits/fits_' + drug + '_' + gene +
'.csv')
# The 24uM experiment of KL001 in Per2 cells had very fast damping and
# poor sinusoidal fits, and was therefore removed from further analysis
frame_fits[1] = frame_fits[1].iloc[2:]
# First we add the full fit results to the fitted objects
def fit_full(row):
return fit_data(row.values, sampling_period, outliers=False,
full=True)
frame_full_fits = []
for frame in frames:
fitted = frame.iloc[:,3:].apply(fit_full, axis=1)
fitted = fitted.reset_index()
for i, item in enumerate(fitted['index']):
if item[-2:] == '.1':
fitted['index'][i] = float(item[:-4])
else: fitted['index'][i] = float(item[:-2])
frame_full_fits += [fitted]
# Import some plotting shortcuts and formatting functions
from CommonFiles.PlotOptions import (PlotOptions, layout_pad,
lighten_color, plot_gray_zero,
solarized)
PlotOptions(uselatex=True)
import matplotlib.pylab as plt
from CommonFiles.PlotOptions import solarized
sol_colors = ['yellow', 'orange', 'violet', 'blue']
colors = [solarized[c] for c in sol_colors]
iden_list = [r'KL001, {\itshape Bmal1-dLuc}',
r'KL001, {\itshape Per2-dLuc}',
r'Longdaysin, {\itshape Bmal1-dLuc}',
r'Longdaysin, {\itshape Per2-dLuc}']
# Just a normal plot of the bioluminescence data
# fig, axmatrix = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222, sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(223, sharex=ax1)
ax4 = fig.add_subplot(224, sharex=ax1, sharey=ax3)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax4.get_yticklabels(), visible=False)
axlist = [ax1, ax3, ax2, ax4]
for frame, iden, color, ax in zip(frame_full_fits, iden_list, colors,
axlist):
frame['index'] = frame['index'].astype(str)
grouped = frame.set_index(['index', [0,1]*9])
amounts = np.array(grouped.index.levels[0].tolist())
inds = np.array(amounts, dtype=float).argsort()
for amount, lighten in zip(amounts[inds][:-2],
np.linspace(0, 0.8, len(amounts))):
for row in grouped.loc[amount].iterrows():
row = row[1]
y = row.x_raw
x = np.arange(len(y))*sampling_period
ax.plot(x, y, color=lighten_color(color, lighten))
ax.text(0.92, 0.92, iden, verticalalignment='top',
horizontalalignment='right', transform=ax.transAxes)
# ax.set_ylim([-1, 1])
import matplotlib.lines as mlines
lines = []
labels = []
for amount, lighten in zip(amounts[inds][:-2],
np.linspace(0, 0.8, len(amounts))):
lines += [mlines.Line2D([], [], color=lighten_color('0.47', lighten))]
labels += [amount + '$\mu$M']
ax1.legend(lines, labels, ncol=2)
ax3.set_ylim([0, 800])
ax.set_xlim([0, len(y)*sampling_period])
ax3.set_xlabel('Time (hrs)')
ax4.set_xlabel('Time (hrs)')
ax1.set_ylabel('Bioluminescence')
ax3.set_ylabel('Bioluminescence')
fig.tight_layout(**layout_pad)
fig = plt.figure(figsize=(2.37, 1.63))
ax = fig.add_subplot(111)
for frame, iden, color in zip(frame_fits, iden_list, colors):
grouped = frame.groupby('index')
means = grouped.aggregate(lambda x: np.mean(x, axis=0))
error = grouped.aggregate(lambda x: stats.sem(x, axis=0))
out = ax.plot(range(len(means.R2)), means.R2,
marker='o', color=color,
markeredgecolor='none', linestyle='--',
label=iden, zorder=2)
out = ax.errorbar(range(len(means.R2)), means.R2, yerr=error.R2,
ecolor='#262626', elinewidth=0.5, capthick=0.5,
zorder=1, linestyle='none')
ax.legend(ncol=1, loc='lower left', numpoints=2, prop={'size':6})
ax.set_xlabel(r'Drug Concentration ($\mu$M)')
ax.set_ylabel(r'$R^2$')
ax.set_xticks(range(len(means.decay)))
ax.set_xlim([-0.5, len(means.decay) - 0.5])
ax.set_xticklabels([str(i) for i in means.index])
fig.tight_layout(**layout_pad)
# Main figure!
import matplotlib
mainfig = plt.figure(figsize=(7.25, 3))
gs_left = matplotlib.gridspec.GridSpec(2,2)
gs_right = matplotlib.gridspec.GridSpec(2,1)
gs_left.update(right=0.5)
gs_right.update(left=0.6)
ax_all = mainfig.add_subplot(gs_left[:,:])
ax_all.spines['top'].set_color('none')
ax_all.spines['bottom'].set_color('none')
ax_all.spines['left'].set_color('none')
ax_all.spines['right'].set_color('none')
ax_all.tick_params(labelcolor='w', top='off', bottom='off', left='off',
right='off')
ax1 = mainfig.add_subplot(gs_left[0,0])
ax2 = mainfig.add_subplot(gs_left[0,1], sharex=ax1, sharey=ax1)
ax3 = mainfig.add_subplot(gs_left[1,0], sharex=ax1, sharey=ax1)
ax4 = mainfig.add_subplot(gs_left[1,1], sharex=ax1, sharey=ax1)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax4.get_yticklabels(), visible=False)
axmatrix_ts = np.array([[ax1, ax2], [ax3, ax4]])
iden_list = [r'KL001, {\itshape Bmal1-dLuc}',
r'KL001, {\itshape Per2-dLuc}',
r'Longdaysin, {\itshape Bmal1-dLuc}',
r'Longdaysin, {\itshape Per2-dLuc}']
# Here we plot the bioluminescence data, but normalize the data using
# the fitted period, phase, and amplitude information. This process
# leaves the decay rate in the data untouched, which highlights the
# differences between the KL001 and Longdaysin trajectories
#
# Next we constuct the plot
# fig, axmatrix = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)
# Loop over each experimental condition, giving a different color
for frame, iden, color, ax in zip(frame_full_fits, iden_list, colors,
axmatrix_ts.flatten()):
frame['index'] = frame['index'].astype(str)
grouped = frame.set_index(['index', [0,1]*9])
amounts = np.array(grouped.index.levels[0].tolist())
inds = np.array(amounts, dtype=float).argsort()
# Loop over the experimental conditions, giving different plotting
# options
for amount, lighten in zip(amounts[inds][:-2],
np.linspace(0, 0.8, len(amounts))):
# Loop over each trajectory
for row in grouped.loc[amount].iterrows():
row = row[1]
# Get the baseline-subtracted data
y_adj = row.x_detrend
# Get the fitted parameters
amplitude = row.amplitude
period = row.period
phase = row.phase
# Get a time-series for x-values
x = np.arange(len(y_adj))*sampling_period
if np.all(np.abs(y_adj/amplitude < 1.)):
x = x/period + phase/(2*np.pi)
ax.plot(x - x[0], y_adj/amplitude,
color=lighten_color(color, lighten))
# ax.set_title(iden)
ax.text(0.95, 0.05, iden, verticalalignment='bottom',
horizontalalignment='right', transform=ax.transAxes,
size='small')
ax.set_ylim([-1, 1])
ax.set_xlim([0, 5])
plot_gray_zero(ax, zorder=0)
ax_all.set_xlabel('Phase (days)')
ax_all.set_ylabel('Bioluminescence')
import matplotlib.lines as mlines
lines = []
labels = []
for amount, lighten in zip(amounts[inds][:-2],
np.linspace(0, 0.8, len(amounts))):
lines += [mlines.Line2D([], [], color=lighten_color('0.47', lighten))]
labels += [amount + '$\mu$M']
axmatrix_ts[0,0].legend(lines, labels, ncol=2)
# Here we plot the dose dependent increase in decay rate for each of the
# 4 systems (2 drugs, 2 reporters)
# colors = ['#FF4844', '#FF9C44', '#2C97A1', '#36CC40']
# fig, axmatrix = plt.subplots(nrows=2, sharex=True, figsize=(2.37, 2.37))
ax1 = mainfig.add_subplot(gs_right[0,0])
ax2 = mainfig.add_subplot(gs_right[1,0], sharex=ax1)
plt.setp(ax2.get_xticklabels(), visible=False)
axmatrix_kd = np.array([ax1, ax2])
for frame, iden, color in zip(frame_fits, iden_list, colors):
grouped = frame.groupby('index')
means = grouped.aggregate(lambda x: np.mean(x, axis=0))
error = grouped.aggregate(lambda x: stats.sem(x, axis=0))
out = ax1.plot(range(len(means.decay)), means.decay,
marker='o', color=color,
markeredgecolor='none', linestyle='--',
label=iden, zorder=2)
out = ax1.errorbar(range(len(means.decay)), means.decay,
yerr=error.decay, ecolor='#262626',
elinewidth=0.5, capthick=0.5, zorder=1,
linestyle='none')
# ax1.legend(ncol=1, loc='upper left', numpoints=2, prop={'size':6})
# ax1.set_xlabel(r'Drug Concentration ($\mu$M)')
ax1.set_ylabel(r'Damping Rate $\left(\nicefrac{1}{\mathrm{hrs}}\right)$')
# ax1.set_xticks(range(len(means.D)))
# ax1.set_xlim([-0.5, len(means.D) - 0.5])
# ax1.set_xticklabels([str(i) for i in means.index])
# Same plot, but this time dose dependent increase in Period
for frame, iden, color in zip(frame_fits, iden_list, colors):
grouped = frame.groupby('index')
means = grouped.aggregate(lambda x: np.mean(x, axis=0))
error = grouped.aggregate(lambda x: stats.sem(x, axis=0))
out = ax2.plot(range(len(means.period)), means.period,
marker='o', color=color,
markeredgecolor='none', linestyle='--',
label=iden, zorder=2)
out = ax2.errorbar(range(len(means.period)), means.period,
yerr=error.period, ecolor='#262626',
elinewidth=0.5, capthick=0.5, zorder=1,
linestyle='none')
ax2.legend(ncol=1, loc='upper left', numpoints=2, prop={'size':6})
ax2.set_xlabel(r'Drug Concentration ($\mu$M)')
ax2.set_ylabel(r'Period (hrs)')
ax2.set_xticks(range(len(means.decay)))
ax2.set_xlim([-0.5, len(means.decay) - 0.5])
ax2.set_xticklabels([str(i) for i in means.index])
# Interestingly, the natural decay rate for Per2-dLuc is lower than
# Bmal1-dLuc, which likely reflects an underlying difference in the
# clock dynamics of each reporter system.
# mainfig.tight_layout(**layout_pad)
gs_left.update(wspace=.075, left=0.075)
gs_right.update(right=.99)
plt.show()
|
gpl-3.0
|
WesleyyC/RestaurantRevenuePrediction
|
Ari/preprocessing/RecursiveFeatureElimination.py
|
2
|
1249
|
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler
df_train = pd.read_csv("train.csv")
feats = df_train.drop("revenue", axis=1)
X = feats.values #features
y = df_train["revenue"].values #target
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
t = StandardScaler()
X = t.fit_transform(X)
y = t.fit_transform(y)
count = 0
for elem in y:
print elem
count += 1
if count > 10:
break
count = 0
for elem in X:
print elem
count += 1
if count > 10:
break
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
|
mit
|
CCI-Tools/ect-core
|
test/core/test_types.py
|
1
|
29024
|
from collections import namedtuple
from datetime import datetime, date
from io import StringIO
from typing import Union, Tuple
from unittest import TestCase
import geopandas as gpd
import numpy as np
import pandas as pd
import shapely.wkt
import xarray as xr
from shapely.geometry import Point, Polygon
from cate.core.op import op_input, OpRegistry
from cate.core.types import Like, VarNamesLike, VarName, PointLike, PolygonLike, TimeRangeLike, GeometryLike, \
DictLike, TimeLike, Arbitrary, Literal, DatasetLike, DataFrameLike, FileLike, GeoDataFrame, HTMLLike, HTML, \
ValidationError, DimName, DimNamesLike
from cate.util.misc import object_to_qualified_name, OrderedDict
# 'ExamplePoint' is an example type which may come from Cate API or other required API.
ExamplePoint = namedtuple('ExamplePoint', ['x', 'y'])
# 'ExampleType' represents a type to be used for values that should actually be a 'ExamplePoint' but
# also have a representation as a `str` such as "2.1,4.3", ar a tuple of two floats (2.1,4.3).
# The "typing type" for this is given by ExampleType.TYPE.
class ExampleType(Like[ExamplePoint]):
TYPE = Union[ExamplePoint, Tuple[float, float], str]
@classmethod
def convert(cls, value, default=None) -> ExamplePoint:
try:
if isinstance(value, ExamplePoint):
return value
if isinstance(value, str):
pair = value.split(',')
return ExamplePoint(float(pair[0]), float(pair[1]))
return ExamplePoint(value[0], value[1])
except Exception:
raise ValidationError('Cannot convert value <%s> to %s.' % (repr(value), cls.name()))
@classmethod
def format(cls, value: ExamplePoint) -> str:
return "%s, %s" % (value.x, value.y)
# TestType = NewType('TestType', _TestType)
# 'scale_point' is an example operation that makes use of the TestType type for argument point_like
_OP_REGISTRY = OpRegistry()
@op_input("point_like", data_type=ExampleType, registry=_OP_REGISTRY)
def scale_point(point_like: ExampleType.TYPE, factor: float) -> ExamplePoint:
point = ExampleType.convert(point_like)
return ExamplePoint(factor * point.x, factor * point.y)
class ExampleTypeTest(TestCase):
def test_use(self):
self.assertEqual(scale_point("2.4, 4.8", 0.5), ExamplePoint(1.2, 2.4))
self.assertEqual(scale_point((2.4, 4.8), 0.5), ExamplePoint(1.2, 2.4))
self.assertEqual(scale_point(ExamplePoint(2.4, 4.8), 0.5), ExamplePoint(1.2, 2.4))
def test_abuse(self):
with self.assertRaises(ValidationError) as cm:
scale_point("A, 4.8", 0.5)
self.assertEqual(str(cm.exception),
"Input 'point_like' for operation 'test.core.test_types.scale_point': "
"Cannot convert value <'A, 4.8'> to ExampleType.")
with self.assertRaises(ValidationError) as cm:
scale_point(25.1, 0.5)
self.assertEqual(str(cm.exception),
"Input 'point_like' for operation 'test.core.test_types.scale_point': "
"Cannot convert value <25.1> to ExampleType.")
def test_registered_op(self):
registered_op = _OP_REGISTRY.get_op(object_to_qualified_name(scale_point))
point = registered_op(point_like="2.4, 4.8", factor=0.5)
self.assertEqual(point, ExamplePoint(1.2, 2.4))
def test_name(self):
self.assertEqual(ExampleType.name(), "ExampleType")
def test_accepts(self):
self.assertTrue(ExampleType.accepts("2.4, 4.8"))
self.assertTrue(ExampleType.accepts((2.4, 4.8)))
self.assertTrue(ExampleType.accepts([2.4, 4.8]))
self.assertTrue(ExampleType.accepts(ExamplePoint(2.4, 4.8)))
self.assertFalse(ExampleType.accepts("A, 4.8"))
self.assertFalse(ExampleType.accepts(25.1))
def test_format(self):
self.assertEqual(ExampleType.format(ExamplePoint(2.4, 4.8)), "2.4, 4.8")
class HTMLLikeTest(TestCase):
"""
Test the HTMLLike type
"""
def test_accepts(self):
self.assertTrue(HTMLLike.accepts('abc'))
self.assertTrue(HTMLLike.accepts(HTML('abc')))
self.assertTrue(HTMLLike.accepts(42))
def test_convert(self):
actual = HTMLLike.convert('abc')
self.assertIsInstance(actual, HTML)
self.assertEqual(actual, 'abc')
def test_format(self):
actual = VarNamesLike.format(HTML('abc'))
self.assertIsInstance(actual, str)
self.assertEqual(actual, 'abc')
class VarNamesLikeTest(TestCase):
"""
Test the VarNamesLike type
"""
def test_accepts(self):
self.assertTrue(VarNamesLike.accepts('aa'))
self.assertTrue(VarNamesLike.accepts('aa,bb,cc'))
self.assertTrue(VarNamesLike.accepts(['aa', 'bb', 'cc']))
self.assertFalse(VarNamesLike.accepts(1.0))
self.assertFalse(VarNamesLike.accepts([1, 2, 4]))
self.assertFalse(VarNamesLike.accepts(['aa', 2, 'bb']))
def test_convert(self):
expected = ['aa', 'b*', 'cc']
actual = VarNamesLike.convert('aa,b*,cc')
self.assertEqual(actual, expected)
with self.assertRaises(ValidationError) as err:
VarNamesLike.convert(['aa', 1, 'bb'])
self.assertEqual(str(err.exception), 'List of variables names expected.')
self.assertEqual(None, VarNamesLike.convert(None))
def test_format(self):
self.assertEqual(VarNamesLike.format(['aa', 'bb', 'cc']), "aa, bb, cc")
self.assertEqual(VarNamesLike.format(['aa']), "aa")
self.assertEqual(VarNamesLike.format([]), "")
self.assertEqual(VarNamesLike.format(None), "")
class VarNameTest(TestCase):
"""
Test the VarName type
"""
def test_accepts(self):
self.assertTrue(VarName.accepts('aa'))
self.assertFalse(VarName.accepts(['aa', 'bb', 'cc']))
self.assertFalse(VarName.accepts(1.0))
def test_convert(self):
expected = 'aa'
actual = VarName.convert('aa')
self.assertEqual(actual, expected)
with self.assertRaises(ValidationError) as err:
VarName.convert(['aa', 'bb', 'cc'])
self.assertEqual(str(err.exception), 'Variable name expected.')
self.assertEqual(None, VarName.convert(None))
def test_format(self):
self.assertEqual('aa', VarName.format('aa'))
class DimNamesLikeTest(TestCase):
"""
Test the DimNamesLike type
"""
def test_accepts(self):
self.assertTrue(DimNamesLike.accepts('aa'))
self.assertTrue(DimNamesLike.accepts('aa,bb,cc'))
self.assertTrue(DimNamesLike.accepts(['aa', 'bb', 'cc']))
self.assertFalse(DimNamesLike.accepts(1.0))
self.assertFalse(DimNamesLike.accepts([1, 2, 4]))
self.assertFalse(DimNamesLike.accepts(['aa', 2, 'bb']))
def test_convert(self):
expected = ['aa', 'b*', 'cc']
actual = DimNamesLike.convert('aa,b*,cc')
self.assertEqual(actual, expected)
with self.assertRaises(ValidationError) as err:
DimNamesLike.convert(['aa', 1, 'bb'])
self.assertEqual(str(err.exception), 'List of dimension names expected.')
self.assertEqual(None, DimNamesLike.convert(None))
def test_format(self):
self.assertEqual(DimNamesLike.format(['aa', 'bb', 'cc']), "aa, bb, cc")
self.assertEqual(DimNamesLike.format(['aa']), "aa")
self.assertEqual(DimNamesLike.format([]), "")
self.assertEqual(DimNamesLike.format(None), "")
class DimNameTest(TestCase):
"""
Test the DimName type
"""
def test_accepts(self):
self.assertTrue(DimName.accepts('aa'))
self.assertFalse(DimName.accepts(['aa', 'bb', 'cc']))
self.assertFalse(DimName.accepts(1.0))
def test_convert(self):
expected = 'aa'
actual = DimName.convert('aa')
self.assertEqual(actual, expected)
with self.assertRaises(ValidationError) as err:
DimName.convert(['aa', 'bb', 'cc'])
self.assertEqual(str(err.exception), 'Dimension name expected.')
self.assertEqual(None, DimName.convert(None))
def test_format(self):
self.assertEqual('aa', DimName.format('aa'))
class FileLikeTest(TestCase):
"""
Test the FileLike type
"""
def test_accepts(self):
self.assertTrue(FileLike.accepts(None))
self.assertTrue(FileLike.accepts(''))
self.assertTrue(FileLike.accepts('a/b/c'))
self.assertTrue(FileLike.accepts(StringIO()))
self.assertFalse(FileLike.accepts(2))
self.assertFalse(FileLike.accepts(True))
def test_convert(self):
self.assertEqual(FileLike.convert(None), None)
self.assertEqual(FileLike.convert(''), None)
self.assertEqual(FileLike.convert('a/b/c'), 'a/b/c')
io = StringIO()
self.assertEqual(FileLike.convert(io), io)
def test_format(self):
self.assertEqual(FileLike.format(None), '')
self.assertEqual(FileLike.format('a/b/c'), 'a/b/c')
io = StringIO()
self.assertEqual(FileLike.format(io), '')
class DictLikeTest(TestCase):
"""
Test the DictLike type
"""
def test_accepts(self):
self.assertTrue(DictLike.accepts(None))
self.assertTrue(DictLike.accepts(''))
self.assertTrue(DictLike.accepts(' '))
self.assertTrue(DictLike.accepts('a=6, b=5.3, c=True, d="Hello"'))
self.assertFalse(DictLike.accepts('{a=True}'))
self.assertFalse(DictLike.accepts('a=true'))
self.assertFalse(DictLike.accepts('{a, b}'))
self.assertFalse(DictLike.accepts(['aa', 'bb', 'cc']))
self.assertFalse(DictLike.accepts(1.0))
def test_convert(self):
self.assertEqual(DictLike.convert(None), None)
self.assertEqual(DictLike.convert(''), None)
self.assertEqual(DictLike.convert(' '), None)
self.assertEqual(DictLike.convert('name="bibo", thres=0.5, drop=False'),
dict(name="bibo", thres=0.5, drop=False))
with self.assertRaises(ValidationError) as err:
DictLike.convert('{a=8, b}')
self.assertEqual(str(err.exception), "Value '{a=8, b}' cannot be converted into a 'DictLike'.")
def test_format(self):
self.assertEqual(DictLike.format(OrderedDict([('name', 'bibo'), ('thres', 0.5), ('drop', True)])),
"name='bibo', thres=0.5, drop=True")
def test_to_json(self):
self.assertEqual(DictLike.to_json(OrderedDict([('name', 'bibo'), ('thres', 0.5), ('drop', True)])),
"name='bibo', thres=0.5, drop=True")
def test_from_json(self):
self.assertEqual(DictLike.from_json("name='bibo', thres=0.5, drop=True"),
dict(name='bibo', thres=0.5, drop=True))
class PointLikeTest(TestCase):
"""
Test the PointLike type
"""
def test_accepts(self):
self.assertTrue(PointLike.accepts(""))
self.assertTrue(PointLike.accepts("\t\n "))
self.assertTrue(PointLike.accepts("2.4, 4.8\n"))
self.assertTrue(PointLike.accepts((2.4, 4.8)))
self.assertTrue(PointLike.accepts([2.4, 4.8]))
self.assertTrue(PointLike.accepts(Point(2.4, 4.8)))
self.assertTrue(PointLike.accepts(Point(2.4, 4.8).wkt))
self.assertFalse(PointLike.accepts("A, 4.8"))
self.assertFalse(PointLike.accepts(25.1))
def test_convert(self):
self.assertEqual(PointLike.convert(None), None)
self.assertEqual(PointLike.convert(''), None)
self.assertEqual(PointLike.convert('0.0,1.0'), Point(0.0, 1.0))
with self.assertRaises(ValidationError) as err:
PointLike.convert('0.0,abc')
self.assertEqual(str(err.exception), "Value cannot be converted into a 'PointLike': "
"Invalid geometry WKT format.")
self.assertEqual(PointLike.convert('POINT(0.0 1.0)'), Point(0.0, 1.0))
def test_format(self):
self.assertEqual(PointLike.format(Point(2.4, 4.8)), "2.4, 4.8")
class PolygonLikeTest(TestCase):
"""
Test the PolygonLike type
"""
def test_accepts(self):
self.assertTrue(PolygonLike.accepts(""))
self.assertTrue(PolygonLike.accepts(" \t"))
self.assertTrue(PolygonLike.accepts("0.0,0.0,1.1,1.1"))
self.assertTrue(PolygonLike.accepts("0.0, 0.0, 1.1, 1.1"))
coords = [(10.4, 20.2), (30.8, 20.2), (30.8, 40.8), (10.4, 40.8)]
pol = Polygon(coords)
self.assertTrue(PolygonLike.accepts(coords))
self.assertTrue(PolygonLike.accepts(pol))
self.assertTrue(PolygonLike.accepts(pol.wkt))
self.assertTrue(PolygonLike.accepts(pol.bounds))
self.assertFalse(PolygonLike.accepts("0.0,aaa,1.1,1.1"))
self.assertFalse(PolygonLike.accepts("0.0, aaa, 1.1, 1.1"))
self.assertFalse(PolygonLike.accepts([(0.0, 0.0), (0.0, 1.0), (1.0, 'aaa'), (1.0, 0.0)]))
self.assertFalse(PolygonLike.accepts([(0.0, 0.0), (0.0, 1.0), 'Guten Morgen, Berlin!', (1.0, 0.0)]))
self.assertFalse(PolygonLike.accepts(Polygon([(0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0)])))
self.assertFalse(PolygonLike.accepts('MULTIPOLYGON()'))
self.assertFalse(PolygonLike.accepts('Something_in_the_month_of_May'))
self.assertFalse(PolygonLike.accepts(1.0))
def test_convert(self):
self.assertEqual(PolygonLike.convert(None), None)
self.assertEqual(PolygonLike.convert(''), None)
coords = [(10.4, 20.2), (30.8, 20.2), (30.8, 40.8), (10.4, 40.8)]
self.assertTrue(PolygonLike.convert(coords), Polygon(coords))
self.assertTrue(PolygonLike.convert([10.4, 20.2, 30.8, 40.8]), Polygon(coords))
with self.assertRaises(ValidationError) as err:
PolygonLike.convert('aaa')
self.assertEqual(str(err.exception),
"Value cannot be converted into a 'PolygonLike': "
"Invalid geometry WKT format.")
def test_format(self):
self.assertEqual(PolygonLike.format(None), '')
coords = [(10.4, 20.2), (30.8, 20.2), (30.8, 40.8), (10.4, 40.8)]
pol = PolygonLike.convert(coords)
self.assertEqual(PolygonLike.format(pol), 'POLYGON ((10.4 20.2, 30.8 20.2, 30.8 40.8, 10.4 40.8, 10.4 20.2))')
def test_json(self):
self.assertEqual(PolygonLike.from_json("-10, -10, 10, 10"), "-10, -10, 10, 10")
class GeometryLikeTest(TestCase):
def test_accepts(self):
self.assertTrue(GeometryLike.accepts("10, 10"))
self.assertTrue(GeometryLike.accepts("10, 10, 20, 20"))
self.assertTrue(GeometryLike.accepts([(10, 10), (20, 10), (20, 20), (10, 20), (10, 10)]))
self.assertTrue(GeometryLike.accepts(shapely.wkt.loads("POINT (10 10)")))
self.assertTrue(GeometryLike.accepts(shapely.wkt.loads("POINT (10 10)")))
self.assertTrue(GeometryLike.accepts(shapely.wkt.loads("MULTIPOINT "
"(10 10, 20 10, 20 20, 10 20, 10 10)")))
self.assertTrue(GeometryLike.accepts(shapely.wkt.loads("LINESTRING "
"(10 10, 20 10, 20 20, 10 20, 10 10)")))
self.assertTrue(GeometryLike.accepts(shapely.wkt.loads("MULTILINESTRING "
"((10 10, 20 10, 20 20, 10 20, 10 10))")))
self.assertTrue(GeometryLike.accepts(shapely.wkt.loads("POLYGON "
"((10 10, 20 10, 20 20, 10 20, 10 10))")))
self.assertTrue(GeometryLike.accepts(shapely.wkt.loads("MULTIPOLYGON "
"(((10 10, 20 10, 20 20, 10 20, 10 10)))")))
self.assertTrue(GeometryLike.accepts(shapely.wkt.loads("GEOMETRYCOLLECTION "
"(POINT (10 10), "
"POLYGON ((10 10, 20 10, 20 20, 10 20, 10 10)))")))
self.assertTrue(GeometryLike.accepts([(0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0)]))
self.assertFalse(GeometryLike.accepts("0.0,aaa,1.1,1.1"))
self.assertFalse(GeometryLike.accepts("0.0, aaa, 1.1, 1.1"))
# empty = Polygon([(0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0)])
self.assertFalse(GeometryLike.accepts('MULTIPOLYGON()'))
self.assertFalse(GeometryLike.accepts('Something_in_the_month_of_May'))
self.assertFalse(GeometryLike.accepts(1.0))
def test_convert(self):
self.assertEqual(GeometryLike.convert(None), None)
self.assertEqual(GeometryLike.convert(""), None)
coords = [(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]
self.assertTrue(GeometryLike.convert(coords), Polygon(coords))
with self.assertRaises(ValidationError) as err:
GeometryLike.convert('aaa')
self.assertEqual(str(err.exception), "Value cannot be converted into a 'GeometryLike': "
"Invalid geometry WKT format.")
def test_format(self):
pol = GeometryLike.convert([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)])
self.assertTrue(GeometryLike.format(pol), 'POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))')
class TimeLikeTest(TestCase):
"""
Test the TimeLike type
"""
def test_accepts(self):
self.assertTrue(TimeLike.accepts(None))
self.assertTrue(TimeLike.accepts(''))
self.assertTrue(TimeLike.accepts(' '))
self.assertTrue(TimeLike.accepts('2001-01-01'))
self.assertTrue(TimeLike.accepts(datetime(2001, 1, 1)))
self.assertTrue(TimeLike.accepts(date(2001, 1, 1)))
self.assertFalse(TimeLike.accepts('4.3'))
self.assertFalse(TimeLike.accepts('2001-01-01,2001-02-01,'))
self.assertFalse(TimeLike.accepts([datetime(2001, 1, 1)]))
def test_convert(self):
self.assertEqual(TimeLike.convert('2017-04-19'), datetime(2017, 4, 19))
self.assertEqual(TimeLike.convert(datetime(2017, 4, 19)), datetime(2017, 4, 19))
self.assertEqual(TimeLike.convert(date(2017, 4, 19)), datetime(2017, 4, 19, 12))
self.assertEqual(TimeLike.convert(' '), None)
self.assertEqual(TimeLike.convert(None), None)
def test_format(self):
self.assertEqual(TimeLike.format(None), '')
self.assertEqual(TimeLike.format(datetime(2017, 4, 19)), '2017-04-19')
def test_json(self):
self.assertEqual(TimeLike.to_json(datetime(2017, 4, 19)), '2017-04-19')
self.assertEqual(TimeLike.from_json('2017-04-19'), datetime(2017, 4, 19))
class TimeRangeLikeTest(TestCase):
"""
Test the TimeRangeLike type
"""
def test_accepts(self):
self.assertTrue(TimeRangeLike.accepts(('2001-01-01', '2002-02-01')))
self.assertTrue(TimeRangeLike.accepts((datetime(2001, 1, 1), datetime(2002, 2, 1))))
self.assertTrue(TimeRangeLike.accepts((date(2001, 1, 1), date(2002, 1, 1))))
self.assertTrue(TimeRangeLike.accepts('2001-01-01,2002-02-01'))
self.assertTrue(TimeRangeLike.accepts('2001-01-01, 2002-02-01'))
self.assertFalse(TimeRangeLike.accepts('2001-01-01'))
self.assertFalse(TimeRangeLike.accepts([datetime(2001, 1, 1)]))
self.assertFalse(TimeRangeLike.accepts('2002-01-01, 2001-01-01'))
def test_convert(self):
self.assertEqual(TimeRangeLike.convert(None), None)
self.assertEqual(TimeRangeLike.convert((None, None)), None)
self.assertEqual(TimeRangeLike.convert([None, None]), None)
self.assertEqual(TimeRangeLike.convert(''), None)
self.assertEqual(TimeRangeLike.convert((datetime(2001, 1, 1), datetime(2002, 2, 1))),
(datetime(2001, 1, 1), datetime(2002, 2, 1)))
self.assertEqual(TimeRangeLike.convert([datetime(2001, 1, 1), datetime(2002, 2, 1)]),
(datetime(2001, 1, 1), datetime(2002, 2, 1)))
self.assertEqual(TimeRangeLike.convert('2001-01-01, 2002-01-01'),
(datetime(2001, 1, 1), datetime(2002, 1, 1, 23, 59, 59)))
self.assertEqual(TimeRangeLike.convert('2001-01-01, 2002-01-01'),
(datetime(2001, 1, 1), datetime(2002, 1, 1, 23, 59, 59)))
with self.assertRaises(ValidationError) as err:
TimeRangeLike.convert('2002-01-01, 2001-01-01')
self.assertTrue('cannot be converted into a' in str(err.exception))
def test_format(self):
self.assertEqual(TimeRangeLike.format(None), '')
self.assertEqual(TimeRangeLike.format((datetime(2001, 1, 1), datetime(2002, 1, 1))),
'2001-01-01, 2002-01-01')
self.assertEqual(TimeRangeLike.format((datetime(2001, 1, 1, 12), datetime(2002, 1, 1, 9, 30, 2))),
'2001-01-01T12:00:00, 2002-01-01T09:30:02')
class TypeNamesTest(TestCase):
"""
This test fails, if any of the expected type names change.
We use these type names in cate-desktop to map from type to validators and GUI editors.
NOTE: If one of these tests fails, we have to change the cate-desktop code w.r.t. the type name change.
"""
def test_python_primitive_type_names(self):
"""
Python primitive types
"""
self.assertEqual(object_to_qualified_name(bool), 'bool')
self.assertEqual(object_to_qualified_name(int), 'int')
self.assertEqual(object_to_qualified_name(float), 'float')
self.assertEqual(object_to_qualified_name(str), 'str')
def test_cate_cdm_type_names(self):
"""
Cate Common Data Model (CDM) types
"""
self.assertEqual(object_to_qualified_name(np.ndarray), 'numpy.ndarray')
self.assertEqual(object_to_qualified_name(xr.Dataset), 'xarray.core.dataset.Dataset')
self.assertEqual(object_to_qualified_name(xr.DataArray), 'xarray.core.dataarray.DataArray')
self.assertEqual(object_to_qualified_name(gpd.GeoDataFrame), 'geopandas.geodataframe.GeoDataFrame')
self.assertEqual(object_to_qualified_name(gpd.GeoSeries), 'geopandas.geoseries.GeoSeries')
self.assertEqual(object_to_qualified_name(pd.DataFrame), 'pandas.core.frame.DataFrame')
self.assertEqual(object_to_qualified_name(pd.Series), 'pandas.core.series.Series')
def test_cate_op_api_type_names(self):
"""
Additional Cate types used by operations API.
"""
self.assertEqual(object_to_qualified_name(VarName), 'cate.core.types.VarName')
self.assertEqual(object_to_qualified_name(VarNamesLike), 'cate.core.types.VarNamesLike')
self.assertEqual(object_to_qualified_name(PointLike), 'cate.core.types.PointLike')
self.assertEqual(object_to_qualified_name(PolygonLike), 'cate.core.types.PolygonLike')
self.assertEqual(object_to_qualified_name(GeometryLike), 'cate.core.types.GeometryLike')
self.assertEqual(object_to_qualified_name(TimeRangeLike), 'cate.core.types.TimeRangeLike')
class ArbitraryTest(TestCase):
def test_convert(self):
self.assertEqual(Arbitrary.convert(None), None)
self.assertEqual(Arbitrary.convert(434), 434)
self.assertEqual(Arbitrary.convert(3.4), 3.4)
self.assertEqual(Arbitrary.convert(True), True)
self.assertEqual(Arbitrary.convert((3, 5, 7)), (3, 5, 7))
self.assertEqual(Arbitrary.convert('abc'), 'abc')
def test_format(self):
self.assertEqual(Arbitrary.format(None), '')
self.assertEqual(Arbitrary.format(434), '434')
self.assertEqual(Arbitrary.format(3.4), '3.4')
self.assertEqual(Arbitrary.format("abc"), "abc")
self.assertEqual(Arbitrary.format(True), 'True')
class LiteralTest(TestCase):
def test_convert(self):
self.assertEqual(Literal.convert(''), None)
self.assertEqual(Literal.convert('None'), None)
self.assertEqual(Literal.convert('434'), 434)
self.assertEqual(Literal.convert('3.4'), 3.4)
self.assertEqual(Literal.convert('True'), True)
self.assertEqual(Literal.convert('"abc"'), 'abc')
# Does not work anymore in Python 3.7
# self.assertEqual(Literal.convert('2 + 6'), 8)
self.assertEqual(Literal.convert('[3, 5, 7]'), [3, 5, 7])
self.assertEqual(Literal.convert('(3, 5, 7)'), (3, 5, 7))
with self.assertRaises(ValidationError):
Literal.convert('[1,2')
with self.assertRaises(ValidationError):
Literal.convert('abc')
def test_format(self):
self.assertEqual(Literal.format(None), '')
self.assertEqual(Literal.format(434), '434')
self.assertEqual(Literal.format(3.4), '3.4')
self.assertEqual(Literal.format("abc"), "'abc'")
self.assertEqual(Literal.format(True), 'True')
self.assertEqual(Literal.format([1, 2, 3]), '[1, 2, 3]')
class DatasetLikeTest(TestCase):
def test_convert(self):
self.assertEqual(DatasetLike.convert(None), None)
data = {'time': ['2000-01-01', '2000-01-02', '2000-01-03'],
'c1': [4, 5, 6],
'c2': [6, 7, 8]}
pd_ds = pd.DataFrame(data=data)
pd_ds = pd_ds.set_index('time')
pd_ds.index = pd.to_datetime(pd_ds.index)
xr_ds = xr.Dataset(data_vars=data)
self.assertIsInstance(DatasetLike.convert(xr_ds), xr.Dataset)
self.assertIsInstance(DatasetLike.convert(pd_ds), xr.Dataset)
with self.assertRaises(ValidationError):
DatasetLike.convert(42)
def test_format(self):
self.assertEqual(DatasetLike.format(None), '')
with self.assertRaises(ValidationError):
data = {'v1': [4, 5, 6], 'v2': [6, 7, 8]}
DatasetLike.format(xr.Dataset(data_vars=data))
class DataFrameLikeTest(TestCase):
def test_convert(self):
self.assertEqual(DataFrameLike.convert(None), None)
data = {'c1': [4, 5, 6], 'c2': [6, 7, 8]}
xr_ds = xr.Dataset(data_vars=data)
pd_ds = pd.DataFrame(data=data)
gdf_ds = gpd.GeoDataFrame.from_features(read_test_features())
proxy_gdf_ds = GeoDataFrame.from_features(read_test_features())
self.assertIsInstance(DataFrameLike.convert(xr_ds), pd.DataFrame)
self.assertIsInstance(DataFrameLike.convert(pd_ds), pd.DataFrame)
self.assertIs(DataFrameLike.convert(pd_ds), pd_ds)
self.assertIsInstance(DataFrameLike.convert(gdf_ds), gpd.GeoDataFrame)
self.assertIs(DataFrameLike.convert(gdf_ds), gdf_ds)
self.assertIsInstance(DataFrameLike.convert(proxy_gdf_ds), GeoDataFrame)
self.assertIs(DataFrameLike.convert(proxy_gdf_ds), proxy_gdf_ds)
with self.assertRaises(ValidationError):
DataFrameLike.convert(42)
def test_format(self):
self.assertEqual(DataFrameLike.format(None), '')
with self.assertRaises(ValidationError):
data = {'c1': [4, 5, 6], 'c2': [6, 7, 8]}
DataFrameLike.format(pd.DataFrame(data=data))
class TestGeoDataFrame(TestCase):
def test_compat_with_geopandas(self):
features = read_test_features()
gdf = GeoDataFrame.from_features(features)
self.assertIs(type(gdf), GeoDataFrame)
self.assertIsInstance(gdf, GeoDataFrame)
self.assertIsInstance(gdf, gpd.GeoDataFrame)
self.assertIsInstance(gdf, pd.DataFrame)
self.assertIs(gdf.features, features)
self.assertIsInstance(gdf['A'], pd.Series)
self.assertIsInstance(gdf.geometry, gpd.GeoSeries)
def test_close(self):
features = read_test_features()
gdf = GeoDataFrame.from_features(features)
self.assertIs(gdf.features, features)
self.assertIsInstance(gdf.lazy_data_frame, gpd.GeoDataFrame)
gdf.close()
self.assertIsNone(gdf.features)
self.assertIsNone(gdf.lazy_data_frame)
def test_fat_ops(self):
features = read_test_features()
gdf = GeoDataFrame.from_features(features)
self.assertIsNotNone(gdf.crs)
from cate.ops.data_frame import data_frame_min, data_frame_max
df_min = data_frame_min(gdf, 'C')
self.assertIsInstance(df_min, gpd.GeoDataFrame)
self.assertEqual(len(df_min), 1)
self.assertEqual(list(df_min.columns), ['A', 'B', 'C', 'geometry'])
self.assertIsInstance(df_min.geometry, gpd.GeoSeries)
self.assertIsNotNone(df_min.crs)
df_max = data_frame_max(gdf, 'C')
self.assertIsInstance(df_max, gpd.GeoDataFrame)
self.assertEqual(len(df_max), 1)
self.assertEqual(list(df_max.columns), ['A', 'B', 'C', 'geometry'])
self.assertIsInstance(df_max.geometry, gpd.GeoSeries)
self.assertIsNotNone(df_max.crs)
def read_test_features():
import fiona
import os
return fiona.open(os.path.join(os.path.dirname(__file__), 'test_data', 'test.geojson'))
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.