repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
apdjustino/urbansim
|
urbansim/models/tests/test_supplydemand.py
|
9
|
4116
|
from __future__ import division
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import supplydemand as supdem
@pytest.fixture
def choosers():
return pd.DataFrame(
{'var1': range(5, 10),
'thing_id': ['a', 'c', 'e', 'g', 'i']})
@pytest.fixture
def alternatives():
return pd.DataFrame(
{'var2': range(10, 20),
'var3': range(20, 30),
'price_col': [1] * 10,
'zone_id': ['w', 'x', 'y', 'z', 'z', 'x', 'y', 'w', 'y', 'y']},
index=pd.Index([x for x in 'abcdefghij'], name='thing_id'))
@pytest.fixture(scope='module')
def alt_segmenter():
return 'zone_id'
class _TestLCM(object):
def apply_predict_filters(self, choosers, alternatives):
choosers = choosers.query('var1 != 7')
alternatives = alternatives.query('var2 != 14')
return choosers, alternatives
def summed_probabilities(self, choosers, alternatives):
return pd.Series(
[1, 0.25, 1, 2, 0.75, 2, 1, 1.5, 0.5],
index=['a', 'b', 'c', 'd', 'f', 'g', 'h', 'i', 'j'])
@pytest.fixture(scope='module')
def lcm():
return _TestLCM()
@pytest.fixture
def filtered(lcm, choosers, alternatives):
return lcm.apply_predict_filters(choosers, alternatives)
@pytest.fixture(scope='module')
def wxyz():
w = 1
x = 0.5
y = 1.25
z = 2
return w, x, y, z
def test_calculate_adjustment_clips(lcm, filtered, alt_segmenter):
clip = 1
choosers, alternatives = filtered
alts_multiplier, submarkets_multiplier, finished = \
supdem._calculate_adjustment(
lcm, choosers, alternatives, alternatives[alt_segmenter],
clip, clip)
pdt.assert_series_equal(
alts_multiplier, pd.Series([1] * 9, index=alternatives.index),
check_dtype=False)
pdt.assert_series_equal(
submarkets_multiplier, pd.Series([1] * 4, index=['w', 'x', 'y', 'z']),
check_dtype=False)
def test_calculate_adjustment(lcm, filtered, alt_segmenter, wxyz):
clip_low = 0
clip_high = 2
choosers, alternatives = filtered
alts_multiplier, submarkets_multiplier, finished = \
supdem._calculate_adjustment(
lcm, choosers, alternatives, alternatives[alt_segmenter],
clip_low, clip_high)
w, x, y, z = wxyz
pdt.assert_series_equal(
alts_multiplier,
pd.Series([w, x, y, z, x, y, w, y, y],
index=alternatives.index))
pdt.assert_series_equal(
submarkets_multiplier,
pd.Series([w, x, y, z], index=['w', 'x', 'y', 'z']))
def test_supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, filtered, wxyz):
clip_low = 0
clip_high = 2
price_col = 'price_col'
w, x, y, z = wxyz
filtered_choosers, filtered_alts = filtered
new_price, submarkets_multiplier = supdem.supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
clip_change_low=clip_low, clip_change_high=clip_high)
pdt.assert_series_equal(
new_price,
pd.Series(
[w, x, y, z, x, y, w, y, y],
index=filtered_alts.index, name='price_col') ** 5)
pdt.assert_series_equal(
submarkets_multiplier,
pd.Series([w, x, y, z], index=['w', 'x', 'y', 'z']) ** 5)
def test_supply_and_demand_base_ratio(
lcm, choosers, alternatives, alt_segmenter, filtered, wxyz):
clip_low = 0
clip_high = 2
price_col = 'price_col'
w, x, y, z = wxyz
filtered_choosers, filtered_alts = filtered
base_multiplier = pd.Series([w, x, y, z], index=['w', 'x', 'y', 'z'])
new_price, submarkets_multiplier = supdem.supply_and_demand(
lcm, choosers, alternatives, alt_segmenter, price_col,
base_multiplier, clip_low, clip_high)
pdt.assert_series_equal(
new_price,
pd.Series(
[w, x, y, z, x, y, w, y, y],
index=filtered_alts.index, name='price_col') ** 6)
pdt.assert_series_equal(
submarkets_multiplier,
pd.Series([w, x, y, z], index=['w', 'x', 'y', 'z']) ** 6)
|
bsd-3-clause
|
JeanKossaifi/scikit-learn
|
sklearn/utils/tests/test_validation.py
|
79
|
18547
|
"""Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
assert_warns(DeprecationWarning, check_array, [0, 1, 2])
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = assert_warns(DeprecationWarning, check_array, [42],
ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
|
bsd-3-clause
|
dropofwill/author-attr-experiments
|
sklearn_pipeline_SVM.py
|
1
|
2809
|
from __future__ import print_function
from pprint import pprint
from time import time
import logging
import numpy as np
import os
import time as tm
from sklearn import datasets
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
###############################################################################
docs = datasets.load_files(container_path="../../sklearn_data/problemC")
X, y = docs.data, docs.target
print(y.shape)
baseline = 1/float(len(list(np.unique(y))))
# define a pipeline combining a text feature extractor with a simple classifier
pipeline = Pipeline([
('vect', CountVectorizer(charset_error='ignore')),
('tfidf', TfidfTransformer(use_idf=True)),
('clf', MultinomialNB())
])
# features to cross-check
parameters = {
'vect__max_df': (0.75, 1.0),
'vect__max_features': (None, 100, 5000, 10000),
'vect__analyzer' : ('char', 'word'),
'vect__ngram_range': ((1, 1), (1, 2), (2,3)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (1.0, 0.5, 0.000001),
#'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 100),
}
# classifier
grid_search = GridSearchCV(pipeline, parameters, verbose=1)
print("Performing grid search...")
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(X, y)
print("done in %0.3fs" % (time() - t0))
print()
improvement = (grid_search.best_score_ - baseline) / baseline
print("Best score: %0.3f" % grid_search.best_score_)
print("Baseline score: %0.3f" % baseline)
print("Improved: %0.3f over baseline" % improvement)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
sub_dir = "Results/"
location = "results" + tm.strftime("%Y%m%d-%H%M%S") + ".txt"
with open( os.path.join(sub_dir, location), 'w+') as myFile:
myFile.write("Best score: %0.3f \n" % grid_search.best_score_)
myFile.write("Baseline score: %0.3f \n" % baseline)
myFile.write("Best parameters set: \n")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
myFile.write("\t%s: %r \n" % (param_name, best_parameters[param_name]))
myFile.write("All parameters tried: \n")
all_parameters = grid_search.grid_scores_
for params, mean_score, scores in sorted(grid_search.grid_scores_):
myFile.write("\t \t %0.3f (+/-%0.03f) for %r \n" % (mean_score, scores.std() / 2, params))
|
unlicense
|
janusnic/21v-python
|
unit_20/parallel_ml/00 - Tutorial Setup .py
|
1
|
2669
|
# coding: utf-8
# ## Tutorial Setup
# ### Check your install
# In[55]:
import numpy
# In[56]:
import scipy
# In[57]:
import matplotlib
# In[58]:
import sklearn
# In[59]:
import psutil
# In[60]:
import pandas
# In[61]:
import IPython.parallel
# Finding the location of an installed package and its version:
# In[62]:
numpy.__path__
# In[63]:
numpy.__version__
# ### Check that you have the datasets
# In[64]:
get_ipython().magic(u'run ../fetch_data.py')
# In[1]:
import os
for fname in os.listdir('../datasets/'):
print(fname)
# ## A NumPy primer
# ### NumPy array dtypes and shapes
# In[2]:
import numpy as np
# In[3]:
a = np.array([1, 2, 3])
# In[4]:
a
# In[5]:
b = np.array([[0, 2, 4], [1, 3, 5]])
# In[6]:
b
# In[7]:
b.shape
# In[8]:
b.dtype
# In[9]:
a.shape
# In[10]:
a.dtype
# In[11]:
np.zeros(5)
# In[12]:
np.ones(shape=(3, 4), dtype=np.int32)
# ### Common array operations
# In[13]:
c = b * 0.5
# In[14]:
c
# In[15]:
c.shape
# In[16]:
c.dtype
# In[17]:
a
# In[18]:
d = a + c
# In[19]:
d
# In[20]:
d[0]
# In[21]:
d[0, 0]
# In[22]:
d[:, 0]
# In[23]:
d.sum()
# In[24]:
d.mean()
# In[25]:
d.sum(axis=0)
# In[26]:
d.mean(axis=1)
# ### Reshaping and inplace update
# In[27]:
e = np.arange(12)
# In[28]:
e
# In[29]:
f = e.reshape(3, 4)
# In[30]:
f
# In[31]:
e
# In[32]:
e[5:] = 0
# In[33]:
e
# In[34]:
f
# ### Combining arrays
# In[35]:
a
# In[36]:
b
# In[37]:
d
# In[38]:
np.concatenate([a, a, a])
# In[39]:
np.vstack([a, b, d])
# In[40]:
np.hstack([b, d])
# ## A Matplotlib primer
# In[41]:
get_ipython().magic(u'matplotlib inline')
# In[42]:
import matplotlib.pyplot as plt
# In[43]:
x = np.linspace(0, 2, 10)
# In[44]:
x
# In[45]:
plt.plot(x, 'o-');
# In[46]:
plt.plot(x, x, 'o-', label='linear')
plt.plot(x, x ** 2, 'x-', label='quadratic')
plt.legend(loc='best')
plt.title('Linear vs Quadratic progression')
plt.xlabel('Input')
plt.ylabel('Output');
# In[47]:
samples = np.random.normal(loc=1.0, scale=0.5, size=1000)
# In[48]:
samples.shape
# In[49]:
samples.dtype
# In[50]:
samples[:30]
# In[51]:
plt.hist(samples, bins=50);
# In[52]:
samples_1 = np.random.normal(loc=1, scale=.5, size=10000)
samples_2 = np.random.standard_t(df=10, size=10000)
# In[53]:
bins = np.linspace(-3, 3, 50)
_ = plt.hist(samples_1, bins=bins, alpha=0.5, label='samples 1')
_ = plt.hist(samples_2, bins=bins, alpha=0.5, label='samples 2')
plt.legend(loc='upper left');
# In[54]:
plt.scatter(samples_1, samples_2, alpha=0.1);
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
mit
|
jrbourbeau/cr-composition
|
processing/legacy/anisotropy/random_trials/save_pvals.py
|
2
|
2479
|
#!/usr/bin/env python
import os
import argparse
import numpy as np
import healpy as hp
from scipy.stats import ks_2samp
import pandas as pd
import comptools as comp
import comptools.anisotropy.anisotropy as anisotropy
if __name__ == "__main__":
p = argparse.ArgumentParser(
description='Extracts and saves desired information from simulation/data .i3 files')
p.add_argument('--infiles_sample_0', dest='infiles_sample_0', nargs='*',
help='Input reference map files')
p.add_argument('--infiles_sample_1', dest='infiles_sample_1', nargs='*',
help='Input reference map files')
p.add_argument('--outfile', dest='outfile',
help='Output DataFrame file')
p.add_argument('--overwrite', dest='overwrite',
default=False, action='store_true',
help='Option to overwrite reference map file, '
'if it alreadu exists')
args = p.parse_args()
if args.infiles_sample_0 is None or args.infiles_sample_1 is None:
raise ValueError('Input files must be specified')
elif len(args.infiles_sample_0) != len(args.infiles_sample_1):
raise ValueError('Both samples of input files must be the same length')
if args.outfile is None:
raise ValueError('Outfile must be specified')
else:
comp.check_output_dir(args.outfile)
data_dict = {'ks_statistic': [], 'pval': []}
# Read in all the input maps
kwargs_relint = {'smooth': 20, 'scale': None, 'decmax': -55}
for file_0, file_1 in zip(args.infiles_sample_0, args.infiles_sample_1):
relint_0 = anisotropy.get_map(files=file_0, name='relint', **kwargs_relint)
relint_1 = anisotropy.get_map(files=file_1, name='relint', **kwargs_relint)
ri_0, ra, ra_err = anisotropy.get_proj_relint(relint_0, n_bins=100)
ri_1, ra, ra_err = anisotropy.get_proj_relint(relint_1, n_bins=100)
print('Comparing:')
print('ri_0 = {}'.format(ri_0))
print('ri_1 = {}\n'.format(ri_1))
ks_statistic, pval = ks_2samp(ri_0, ri_1)
print('ks_statistic = {}'.format(ks_statistic))
print('pval = {}\n\n'.format(pval))
data_dict['ks_statistic'].append(ks_statistic)
data_dict['pval'].append(pval)
with pd.HDFStore(args.outfile) as output_store:
dataframe = pd.DataFrame(data_dict)
output_store.put('dataframe', dataframe, format='table', data_columns=True)
|
mit
|
ZallenLab/denticleorganization
|
statistical_modeling_PYTHON/StatisticalModel_DenticleOrganization_only.py
|
1
|
16515
|
# Statistical model for denticle organization
#
# For more details, see [Spencer, Schaumberg and Zallen, Molecular Biology of the Cell, 2017](https://www.ncbi.nlm.nih.gov/pubmed/28404752). If this code has been helpful to you, please cite our paper.
#
# Basic methodology:
# The equation L = α*D + D(N-1) + α*D describes the general case for determining the distance between denticles (D) given the known cell length (L) and the number of denticles (N) in the cell, where α is the spacing ratio (the ratio of the average denticle-to-cell-edge distance to the average denticle-to-denticle distance).
#
# The ideal position of each denticle n_i can be described by the equation position_i = (α + n_i - 1) / (2α + N - 1) where α is the spacing ratio and N is the total number of denticles in the cell.
#
# This program generates simulated denticle positions for a dataset comprising (L,N,D) according to a series of truncated normal distributions with μ = the ideal position (calculated from the above equation) and σ = D/{a specified set of divisors}. Denticle spacing distances are calculated from the positional data. Simulated spacing distances are then compared to the in vivo spacing data.
from __future__ import division
from sys import argv
import numpy as np
import pandas as pd
import scipy.stats as sps
# import datetime as dt
import time
import string
import csv
# Functions
def IdealPositionMu(celllength, dentnumber, alpha):
# calculates the ideal position(s) of denticle(s) in a cell with length = celllength
distances = []
for dent in range(1,dentnumber+1):
distances.append(((alpha + dent - 1) / (2*alpha + dentnumber - 1)) * celllength)
return distances
def AddByLine(datatoadd, addto_list, frame, idx):
datatoadd = np.array(datatoadd)
datatoadd.T
for line in datatoadd:
addto_list.append(tuple(line))
tempseries = pd.Series(addto_list,index=idx)
return frame.append(tempseries,ignore_index=True)
def SigmasCalculator(celllength, dentnumber, alphas, stdevnumbers):
alphas = np.array(alphas)
# stdevnumbers = np.array([[5], [6], [7], [8]])
sigma_lengths = np.zeros((len(stdevnumbers),len(alphas)))
sigma_lengths = (celllength / (2*alphas + dentnumber -1)) / stdevnumbers
# sigmasFrame = pd.DataFrame(sigma_lengths)
return sigma_lengths #, sigmasFrame
def GenerateDistributions(mus, sigmas, celllength):
# generates values from a truncated normal distribution (adjusted to account for cell length - denticles cannot be placed outside of the cell boundaries, ie less than 0 or more than celllength)
# mus is a tuple or list, sigmas is a series or dict, celllength and dentnumber are values
positions = np.zeros((len(mus), len(sigmas)))
for s, sigma in enumerate(sigmas):
for m, mu in enumerate(mus):
lower, upper = 0, celllength
positions[m,s] = sps. ((lower-mu)/sigma, (upper-mu)/sigma, loc=mu, scale=sigma)
positions = np.sort(positions,axis=0)
# sort by rows to order denticle positions from smallest to largest
distances = positions[1:,:] - positions[0:-1,:]
return positions.T, distances.T
# each row is an entire cell with all the denticles
# each row of the returned arrays hold positions for a different value of mu, columns correspond to different sigmas
# arr.T : arr :
# [m1] [m2] [m3] [m4] # [5] [6] [7] [8]
# [5] a a a a # [m1] a a a a
# [6] b b b b # [m2] b b b b
# [7] c c c c # [m3] c c c c
# [8] d d d d # [m4] d d d d
def SliceSaver(array,names):
# takes a 3d array, slices by z axis, saves each slice as a csv
for dex, arr in enumerate(array):
np.savetxt(genotype +'_'+ names[dex] + '.csv', arr, delimiter=',')
def NameMaker(idx1, addon):
names = []
for i, elem in enumerate(idx1):
names.append(str(elem) + '_' + addon + time.strftime("%Y%m%d_%H%M%S"))
# for windows
# names.append(str(elem) + '_' + addon)
return names
def IndivStatTest(array, names, filename_out):
# IN: 3D np array, list of strings with length=arr[X,:,:] (array axis 0), name of csv file
for dex, section in enumerate(array):
for itnumber, iterationdata in enumerate(section.T):
simdata = iterationdata[np.nonzero(iterationdata)]
test_ks = sps.ks_2samp(invivo_d, simdata)
test_mwu = sps.mannwhitneyu(invivo_d[np.nonzero(invivo_d)], simdata)
with open(filename_out, 'a') as f:
csv.writer(f).writerows([[itnumber, names[dex], test_ks[0], test_ks[1], TestPasses(test_ks[1], 0.05), test_mwu[0], test_mwu[1], TestPasses(test_mwu[1], 0.05)]])
def TestPasses(pval, cutoff):
if pval <= cutoff:
return 'different'
elif pval > cutoff:
return 'same'
def ComboStatTest(array,names,filename_out):
# IN: 3D np array, list of strings with length=arr[X,:,:] (array axis 0), name of csv file
for dex, section in enumerate(array):
simdata = section[np.nonzero(section)].flatten()
test_ks = sps.ks_2samp(invivo_d, simdata)
test_mwu = sps.mannwhitneyu(invivo_d, simdata)
with open(filename_out, 'a') as f:
csv.writer(f).writerows([['-', names[dex], test_ks[0], test_ks[1], TestPasses(test_ks[1], 0.05), test_mwu[0], test_mwu[1], TestPasses(test_mwu[1], 0.05)]])
starttime = time.clock() # returns time in seconds
# for cmd line call
script, input_file, cells_to_simulate = argv
data = pd.read_csv(input_file)
# # for jupyter notebook
# input_file, cells_to_simulate = 'yw_CellbyCell.csv', 1
# data = pd.read_csv(input_file)
# get genotype from filename
genotype = input_file.split('_')[0] # split the string at the underscore, returns a list of the parts (no underscores), take the first/before part
# clear out missing data
data = data.replace(0,np.nan) # turn zeros into NaNs
data = data.dropna(how='all') # drop any column (axis=0) or row (axis=1) where ALL values are NaN
data = data.replace(np.nan,0) # turn NaNs back into zeros, so arithmetic can be normal for 'true-zero' values
data = data[data.dentincell != 1] # drop columns where the cell has only 1 denticle
# save the invivo denticle separation distances to a 1xN matrix
invivo_d = data.as_matrix(columns=data.columns[10:]).flatten()
invivo_d = invivo_d[np.nonzero(invivo_d)]
# # using the 'absolute' DV length (dist between edge markers):
# invivo_ln = pd.DataFrame(data,columns=['dentincell','Dvlen'])
# invivo_ln.columns = ['dentincell','dvlength']
# using the summed DVlength (sum of dent-edge, dent-dent ... dent-dent, dent-edge); sum to get the additive, rather than absolute, DV length:
invivo_ln = pd.DataFrame(data,columns=['dentincell'])
invivo_ln['dvlength'] = data['dentEdgeL'] + data['dentEdgeR'] + (data[data.columns[10:]]).sum(axis=1,skipna=True,numeric_only=True)
# invivo_ln['dentincell'] = invivo_ln['dentincell'].astype(int)
invivo_ln = invivo_ln.T
# data = None # clear data from memory
# Constants and basic indicies
maxDent = int(max(invivo_ln.ix['dentincell']))
alphas = (5/10, 6/10, 2/3, 7/10, 3/4, 8/10, 9/10, 1)
idx_alphas = ['halfD','sixtenthsD','twothirdsD','sevententhsD','threequartersD','eighttenthsD','ninetenthsD','oneD']
basicinfo = ['dvlength','dentincell']
idx_positions = basicinfo + idx_alphas
# define arrays for iteration data grouped by sigma value
# alphas iterations cells in dataset * max denticles
# sd_five = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_six = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_seven = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_eight = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# numberofstdevs = np.array([[5], [6], [7], [8])
# idx_sigmas = ['five','six','seven','eight']
# # define arrays for iteration data grouped by sigma value
# # alphas iterations cells in dataset * max denticles
# sd_five = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_six = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_seven = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_eight = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
#
# sdarrs = [sd_five, sd_six, sd_seven, sd_eight]
numberofstdevs = np.array([[5], [6], [7], [8], [9]])
idx_sigmas = ['five','six','seven','eight','nine']
# sd_five = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_six = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_seven = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_eight = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sd_nine = np.zeros((len(idx_alphas), len(invivo_ln.T)*(int(maxDent)-1), int(cells_to_simulate)))
# sdarrs = [sd_five, sd_six, sd_seven, sd_eight, sd_nine]
# MAIN
# initial calculations of the ideal positions and sigmas
positionframe = pd.DataFrame()
sigmaframe = pd.DataFrame()
for dex, row in enumerate(invivo_ln):
celllength = invivo_ln[row]['dvlength']
dentnumber = invivo_ln[row]['dentincell'].astype(int)
# calculate mu (ideal positions); generates a len(invivo_ln) x 10 DataFrame
positionlist = []
for alpha in alphas:
positionlist.append(IdealPositionMu(celllength,dentnumber,alpha))
positionframe = AddByLine(positionlist, [celllength, dentnumber], positionframe, idx_positions)
# calculate sigmas; generates a multi-indexed len(invivo_ln)*len(sigmas) x 8 DataFrame
sigmaframe = pd.concat([sigmaframe,
pd.DataFrame(SigmasCalculator(celllength,dentnumber,alphas,numberofstdevs),
index=[[dex]*len(idx_sigmas),idx_sigmas],columns=idx_alphas)])
positionframe = positionframe[idx_positions]
sigmaframe.index.names = ['cell','numbSDs']
sigmaframe.columns.names = ['alpha']
positionframe.index.names = ['cell']
# positionframe.to_csv('ideal_positions_' + dt.datetime.isoformat(dt.datetime.now()) + '_' + str(time.clock() - starttime) + '.csv')
# sigmaframe.to_csv('sigmavalues_' + dt.datetime.isoformat(dt.datetime.now()) + '_' + str(time.clock() - starttime) + '.csv')
positionframe.to_csv('ideal_positions.csv')
sigmaframe.to_csv('sigmavalues.csv')
# print('initial calcs',time.clock() - starttime)
initcalctime = time.clock() - starttime
# generate random positions
for iteration in range(0, int(cells_to_simulate)):
ittime = time.clock()
# number of cells each (alpha,sigma) pair + L + N denticles possible
randompositions = np.zeros((len(invivo_ln.T), (len(idx_sigmas) * len(idx_alphas) + 2), maxDent))
randomdistances = np.zeros((len(invivo_ln.T), (len(idx_sigmas) * len(idx_alphas) + 2), maxDent))
for cellnumber, __ in enumerate(positionframe.T):
celllength, dentnumber = positionframe.iloc[cellnumber]['dvlength'], int(positionframe.iloc[cellnumber]['dentincell'])
randomdistances[cellnumber, 0, :] = positionframe.iloc[cellnumber]['dvlength']
randomdistances[cellnumber, 1, :] = positionframe.iloc[cellnumber]['dentincell']
randompositions[cellnumber, 0, :] = positionframe.iloc[cellnumber]['dvlength']
randompositions[cellnumber, 1, :] = positionframe.iloc[cellnumber]['dentincell']
for alphano, elem in enumerate(idx_alphas):
positionset = positionframe.iloc[cellnumber][elem]
sigmaset = sigmaframe.ix[positionframe.iloc[cellnumber].name][elem]
positions, distances = GenerateDistributions(positionframe.iloc[cellnumber][elem], sigmaframe.ix[positionframe.iloc[cellnumber].name][elem], celllength)
randompositions[cellnumber, (len(idx_sigmas)*alphano)+len(basicinfo) : (len(idx_sigmas)*alphano + len(idx_sigmas))+len(basicinfo), 0:dentnumber] = positions
randomdistances[cellnumber, (len(idx_sigmas)*alphano)+len(basicinfo) : (len(idx_sigmas)*alphano + len(idx_sigmas))+len(basicinfo), 0:dentnumber-1] = distances
# sd_five[alphano, (dentnumber-1)*cellnumber : (dentnumber-1)*cellnumber+(dentnumber-1), iteration] = distances[0,:]
# sd_six[alphano, (dentnumber-1)*cellnumber : (dentnumber-1)*cellnumber+(dentnumber-1), iteration] = distances[1,:]
# sd_seven[alphano, (dentnumber-1)*cellnumber : (dentnumber-1)*cellnumber+(dentnumber-1), iteration] = distances[2,:]
# sd_eight[alphano, (dentnumber-1)*cellnumber : (dentnumber-1)*cellnumber+(dentnumber-1), iteration] = distances[3,:]
# print('iteration time',time.clock() - ittime)
# items: axis 0, each item corresponds to a DataFrame contained inside; major_axis: axis 1, it is the index (rows) of each of the DataFrames; minor_axis: axis 2, it is the columns of each of the DataFrames
randompositionpanel = pd.Panel(randompositions,
major_axis=[list(np.repeat('basicinfo',2)) + list(np.repeat(idx_alphas, len(idx_sigmas))), ['celllength','dentnumber'] + list(idx_sigmas * len(idx_alphas))],
minor_axis=list(string.ascii_lowercase[:maxDent]))
randomdistancepanel = pd.Panel(randomdistances,
major_axis=[list(np.repeat('basicinfo',2)) + list(np.repeat(idx_alphas, len(idx_sigmas))), ['celllength','dentnumber'] + list(idx_sigmas * len(idx_alphas))],
minor_axis=list(string.ascii_lowercase[:maxDent]))
randompositionframe = randompositionpanel.to_frame()
randomdistanceframe = randomdistancepanel.to_frame()
elapsedtime = time.clock() - ittime
randompositionframe.to_csv(genotype + '_iteration' + str(iteration) + '_random_positions_' + time.strftime("%Y%m%d_%H%M%S") + '_iterationtime=' + str(elapsedtime) + '.csv')
randomdistanceframe.to_csv(genotype + '_iteration' + str(iteration) + '_random_distances_' + time.strftime("%Y%m%d_%H%M%S") + '_iterationtime=' + str(elapsedtime) + '.csv')
# for dex, varname in enumerate(sdarrs):
# SliceSaver(varname,NameMaker(idx_alphas,idx_sigmas[dex]))
# IndivStatTest(varname, NameMaker(idx_alphas, idx_sigmas[dex]), genotype + '_statistics_' + str(dex) + idx_sigmas[dex] + time.strftime("%Y%m%d_%H%M%S") + '.csv')
# ComboStatTest(varname, NameMaker(idx_alphas, idx_sigmas[dex]), genotype + '_statistics_' + str(dex) + idx_sigmas[dex] + time.strftime("%Y%m%d_%H%M%S") + '.csv')
# # for windows
# IndivStatTest(varname, NameMaker(idx_alphas, idx_sigmas[dex]), genotype + '_statistics_' + str(dex) + idx_sigmas[dex] + '.csv')
# ComboStatTest(varname, NameMaker(idx_alphas, idx_sigmas[dex]), genotype + '_statistics_' + str(dex) + idx_sigmas[dex] + '.csv')
endtime = time.clock()
# # save data from each alpha,sigma combo as a separate file
# SliceSaver(sd_five,NameMaker(idx_alphas,'five'))
# SliceSaver(sd_six,NameMaker(idx_alphas,'six'))
# SliceSaver(sd_seven,NameMaker(idx_alphas,'seven'))
# SliceSaver(sd_eight,NameMaker(idx_alphas,'eight'))
# IndivStatTest(sd_five, NameMaker(idx_alphas, 'five'), genotype + '_statistics_five.csv')
# ComboStatTest(sd_five, NameMaker(idx_alphas, 'five'), genotype + '_statistics_five.csv')
# IndivStatTest(sd_six, NameMaker(idx_alphas, 'six'), genotype + '_statistics_six.csv')
# ComboStatTest(sd_six, NameMaker(idx_alphas, 'six'), genotype + '_statistics_six.csv')
# IndivStatTest(sd_seven, NameMaker(idx_alphas, 'seven'), genotype + '_statistics_seven.csv')
# ComboStatTest(sd_seven, NameMaker(idx_alphas, 'seven'), genotype + '_statistics_seven.csv')
# IndivStatTest(sd_eight, NameMaker(idx_alphas, 'eight'), genotype + '_statistics_eight.csv')
# ComboStatTest(sd_eight, NameMaker(idx_alphas, 'eight'), genotype + '_statistics_eight.csv')
# print('iteration time',elapsedtime)
# print('total time',endtime-starttime)
|
gpl-3.0
|
rrohan/scikit-learn
|
sklearn/feature_extraction/tests/test_feature_hasher.py
|
258
|
2861
|
from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
|
bsd-3-clause
|
zrhans/pythonanywhere
|
.virtualenvs/django19/lib/python3.4/site-packages/matplotlib/testing/jpl_units/Duration.py
|
8
|
6762
|
#===========================================================================
#
# Duration
#
#===========================================================================
"""Duration module."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class Duration(object):
"""Class Duration in development.
"""
allowed = [ "ET", "UTC" ]
#-----------------------------------------------------------------------
def __init__( self, frame, seconds ):
"""Create a new Duration object.
= ERROR CONDITIONS
- If the input frame is not in the allowed list, an error is thrown.
= INPUT VARIABLES
- frame The frame of the duration. Must be 'ET' or 'UTC'
- seconds The number of seconds in the Duration.
"""
if frame not in self.allowed:
msg = "Input frame '%s' is not one of the supported frames of %s" \
% ( frame, str( self.allowed ) )
raise ValueError( msg )
self._frame = frame
self._seconds = seconds
#-----------------------------------------------------------------------
def frame( self ):
"""Return the frame the duration is in."""
return self._frame
#-----------------------------------------------------------------------
def __abs__( self ):
"""Return the absolute value of the duration."""
return Duration( self._frame, abs( self._seconds ) )
#-----------------------------------------------------------------------
def __neg__( self ):
"""Return the negative value of this Duration."""
return Duration( self._frame, -self._seconds )
#-----------------------------------------------------------------------
def seconds( self ):
"""Return the number of seconds in the Duration."""
return self._seconds
#-----------------------------------------------------------------------
def __nonzero__( self ):
"""Compare two Durations.
= INPUT VARIABLES
- rhs The Duration to compare against.
= RETURN VALUE
- Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
"""
return self._seconds != 0
if six.PY3:
__bool__ = __nonzero__
#-----------------------------------------------------------------------
def __cmp__( self, rhs ):
"""Compare two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to compare against.
= RETURN VALUE
- Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
"""
self.checkSameFrame( rhs, "compare" )
return cmp( self._seconds, rhs._seconds )
#-----------------------------------------------------------------------
def __add__( self, rhs ):
"""Add two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to add.
= RETURN VALUE
- Returns the sum of ourselves and the input Duration.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
if isinstance( rhs, U.Epoch ):
return rhs + self
self.checkSameFrame( rhs, "add" )
return Duration( self._frame, self._seconds + rhs._seconds )
#-----------------------------------------------------------------------
def __sub__( self, rhs ):
"""Subtract two Durations.
= ERROR CONDITIONS
- If the input rhs is not in the same frame, an error is thrown.
= INPUT VARIABLES
- rhs The Duration to subtract.
= RETURN VALUE
- Returns the difference of ourselves and the input Duration.
"""
self.checkSameFrame( rhs, "sub" )
return Duration( self._frame, self._seconds - rhs._seconds )
#-----------------------------------------------------------------------
def __mul__( self, rhs ):
"""Scale a UnitDbl by a value.
= INPUT VARIABLES
- rhs The scalar to multiply by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, self._seconds * float( rhs ) )
#-----------------------------------------------------------------------
def __rmul__( self, lhs ):
"""Scale a Duration by a value.
= INPUT VARIABLES
- lhs The scalar to multiply by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, self._seconds * float( lhs ) )
#-----------------------------------------------------------------------
def __div__( self, rhs ):
"""Divide a Duration by a value.
= INPUT VARIABLES
- rhs The scalar to divide by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, self._seconds / float( rhs ) )
#-----------------------------------------------------------------------
def __rdiv__( self, rhs ):
"""Divide a Duration by a value.
= INPUT VARIABLES
- rhs The scalar to divide by.
= RETURN VALUE
- Returns the scaled Duration.
"""
return Duration( self._frame, float( rhs ) / self._seconds )
#-----------------------------------------------------------------------
def __str__( self ):
"""Print the Duration."""
return "%g %s" % ( self._seconds, self._frame )
#-----------------------------------------------------------------------
def __repr__( self ):
"""Print the Duration."""
return "Duration( '%s', %g )" % ( self._frame, self._seconds )
#-----------------------------------------------------------------------
def checkSameFrame( self, rhs, func ):
"""Check to see if frames are the same.
= ERROR CONDITIONS
- If the frame of the rhs Duration is not the same as our frame,
an error is thrown.
= INPUT VARIABLES
- rhs The Duration to check for the same frame
- func The name of the function doing the check.
"""
if self._frame != rhs._frame:
msg = "Cannot %s Duration's with different frames.\n" \
"LHS: %s\n" \
"RHS: %s" % ( func, self._frame, rhs._frame )
raise ValueError( msg )
#===========================================================================
|
apache-2.0
|
HolgerPeters/scikit-learn
|
examples/cluster/plot_dbscan.py
|
346
|
2479
|
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
Starkiller4011/tsat
|
tsat/sf/astro_sf.py
|
1
|
1552
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
#####################################
# ╔╗ ┬ ┬ ┬┌─┐ ╔╦╗┌─┐┌┬┐ #
# ╠╩╗│ │ │├┤ ║║│ │ │ #
# ╚═╝┴─┘└─┘└─┘ ═╩╝└─┘ ┴ #
# ╔═╗┌─┐┌─┐┌┬┐┬ ┬┌─┐┬─┐┌─┐ #
# ╚═╗│ │├┤ │ │││├─┤├┬┘├┤ #
# ╚═╝└─┘└ ┴ └┴┘┴ ┴┴└─└─┘ #
#####################################
Author: Derek Blue
"""
# Imports
import warnings
try:
import matplotlib.pyplot as plt
from matplotlib import rcParams as rcp
from matplotlib import ticker as tk
except ImportError:
print "Matplotlib not installed, try installing Annaconda or pip install matplotlib"
import sys
sys.exit()
import sf_classes as sfc
import sf_methods as sfm
# For now
warnings.filterwarnings("ignore")
def __main__():
########################################
root_path = '/home/dblue/Documents/School/Summer2017/PY-SF/astroSF/Data/VEGA/'
file_name = 'ngc7469_opt-4845A.lc'
headers = True
source = 'ngc7469'
band = 'OP'
freq = 1315
########################################
file_path = root_path + file_name
attribs = [source, band, freq]
########################################
test = sfc.LightCurve('./RAW/TESTING/ark564.lc',
False, ['ark564', 'XR', 10])
test.plot()
__main__()
|
mit
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/pyRiemann-0.2.2/pyriemann/tangentspace.py
|
2
|
2127
|
from .utils.mean import mean_covariance
from .utils.tangentspace import tangent_space, untangent_space
import numpy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.lda import LDA
class TangentSpace(BaseEstimator, TransformerMixin):
def __init__(self, metric='riemann', tsupdate=False):
self.metric = metric
self.tsupdate = tsupdate
def fit(self, X, y=None):
# compute mean covariance
self.Cr = mean_covariance(X, metric=self.metric)
return self
def transform(self, X):
if self.tsupdate:
Cr = mean_covariance(X, metric=self.metric)
else:
Cr = self.Cr
return tangent_space(X, Cr)
def fit_transform(self, X, y=None):
# compute mean covariance
self.Cr = mean_covariance(X, metric=self.metric)
return tangent_space(X, self.Cr)
def inverse_transform(self, X, y=None):
return untangent_space(X, self.Cr)
########################################################################
class FGDA(BaseEstimator, TransformerMixin):
def __init__(self, metric='riemann', tsupdate=False):
self.metric = metric
self.tsupdate = tsupdate
self._ts = TangentSpace(metric=metric, tsupdate=tsupdate)
def _fit_lda(self, X, y):
self.classes = numpy.unique(y)
self._lda = LDA(
n_components=len(
self.classes) - 1,
solver='lsqr',
shrinkage='auto')
ts = self._ts.fit_transform(X)
self._lda.fit(ts, y)
W = self._lda.coef_.copy()
self._W = numpy.dot(
numpy.dot(W.T, numpy.linalg.pinv(numpy.dot(W, W.T))), W)
return ts
def _retro_project(self, ts):
ts = numpy.dot(ts, self._W)
return self._ts.inverse_transform(ts)
def fit(self, X, y=None):
self._fit_lda(X, y)
return self
def transform(self, X):
ts = self._ts.transform(X)
return self._retro_project(ts)
def fit_transform(self, X, y=None):
ts = self._fit_lda(X, y)
return self._retro_project(ts)
|
bsd-3-clause
|
ua-snap/downscale
|
old/bin/old/cru_ts31_to_cl20_downscaling.py
|
2
|
18759
|
# # #
# Current implementation of the cru ts31 (ts32) delta downscaling procedure
#
# Author: Michael Lindgren ([email protected])
# # #
import numpy as np
def write_gtiff( output_arr, template_meta, output_filename, compress=True ):
'''
DESCRIPTION:
------------
output a GeoTiff given a numpy ndarray, rasterio-style
metadata dictionary, and and output_filename.
If a multiband file is to be processed, the Longitude
dimension is expected to be the right-most.
--> dimensions should be (band, latitude, longitude)
ARGUMENTS:
----------
output_arr = [numpy.ndarray] with longitude as the right-most dimension
template_meta = [dict] rasterio-style raster meta dictionary. Typically
found in a template raster by: rasterio.open( fn ).meta
output_filename = [str] path to and name of the output GeoTiff to be
created. currently only 'GTiff' is supported.
compress = [bool] if True (default) LZW-compression is applied to the
output GeoTiff. If False, no compression is applied.
* this can also be added (along with many other gdal creation options)
to the template meta as a key value pair template_meta.update( compress='lzw' ).
See Rasterio documentation for more details. This is just a common one that is
supported here.
RETURNS:
--------
string path to the new output_filename created
'''
import os
if 'transform' in template_meta.keys():
_ = template_meta.pop( 'transform' )
if not output_filename.endswith( '.tif' ):
UserWarning( 'output_filename does not end with ".tif", it has been fixed for you.' )
output_filename = os.path.splitext( output_filename )[0] + '.tif'
if output_arr.ndim == 2:
# add in a new dimension - can get you into trouble with very large rasters...
output_arr = output_arr[ np.newaxis, ... ]
elif output_arr.ndim < 2:
raise ValueError( 'output_arr must have at least 2 dimensions' )
nbands, nrows, ncols = output_arr.shape
if template_meta[ 'count' ] != nbands:
raise ValueError( 'template_meta[ "count" ] must match output_arr bands' )
if compress == True and 'compress' not in template_meta.keys():
template_meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **template_meta ) as out:
for band in range( 1, nbands+1 ):
out.write( output_arr[ band-1, ... ], band )
return output_filename
def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0):
import numpy as np
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def padded_bounds( rst, npixels, crs ):
'''
convert the extents of 2 overlapping rasters to a shapefile with
an expansion of the intersection of the rasters extents by npixels
rst1: rasterio raster object
rst2: rasterio raster object
npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
expand in each direction. for 5 pixels in each direction it would look like
this: (-5. -5. 5, 5) or just in the right and top directions like this:
(0,0,5,5).
crs: epsg code or proj4string defining the geospatial reference
system
output_shapefile: string full path to the newly created output shapefile
'''
import rasterio, os, sys
from shapely.geometry import Polygon
resolution = rst.res[0]
new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
# new_ext = bounds_to_extent( new_bounds )
return new_bounds
def xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', linear
'''
import numpy as np
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi.astype( output_dtype ) )
return zi
def run( df, meshgrid_tuple, lons_pcll, template_raster_fn, src_transform, src_crs, src_nodata, output_filename ):
'''
run the interpolation to a grid, and reprojection / resampling to the Alaska / Canada rasters
extent, resolution, origin (template_raster).
This function is intended to be used to run a pathos.multiprocessing Pool's map function
across a list of pre-computed arguments.
RETURNS:
[str] path to the output filename generated
'''
template_raster = rasterio.open( template_raster_fn )
interp_arr = xyz_to_grid( np.array(df['lon'].tolist()), \
np.array(df['lat'].tolist()), \
np.array(df['anom'].tolist()), grid=meshgrid_tuple, method='cubic' )
src_nodata = -9999.0 # nodata
interp_arr[ np.isnan( interp_arr ) ] = src_nodata
dat, lons = shiftgrid( 180., interp_arr, lons_pcll, start=False )
output_arr = np.empty_like( template_raster.read( 1 ) )
# mask it with the internal mask in the template raster, where 0 is oob.
output_arr = np.ma.masked_where( template_raster.read_masks( 1 ) == 0, output_arr )
template_meta = template_raster.meta
if 'transform' in template_meta.keys():
template_meta.pop( 'transform' )
reproject( dat, output_arr, src_transform=src_transform, src_crs=src_crs, src_nodata=src_nodata, \
dst_transform=template_meta['affine'], dst_crs=template_meta['crs'],\
dst_nodata=None, resampling=RESAMPLING.nearest, num_threads=1, SOURCE_EXTRA=1000 )
return write_gtiff( output_arr, template_meta, output_filename, compress=True )
def fn_month_grouper( x ):
'''
take a filename and return the month element of the naming convention
'''
return os.path.splitext(os.path.basename(x))[0].split( '_' )[5]
def downscale_cru_historical( file_list, cru_cl20_arr, output_path, downscaling_operation ):
'''
take a list of cru_historical anomalies filenames, groupby month,
then downscale with the cru_cl20 climatology as a numpy 2d ndarray
that is also on the same grid as the anomalies files.
(intended to be the akcan 1km/2km extent).
operation can be one of 'mult', 'add', 'div' and represents the
downscaling operation to be use to scale the anomalies on top of the baseline.
this is based on how the anomalies were initially calculated.
RETURNS:
output path location of the new downscaled files.
'''
from functools import partial
def f( anomaly_fn, baseline_arr, output_path ):
def add( cru, anom ):
return cru + anom
def mult( cru, anom ):
return cru * anom
def div( cru, anom ):
# return cru / anom
# this one may not be useful, but the placeholder is here
return NotImplementedError
cru_ts31 = rasterio.open( anomaly_fn )
meta = cru_ts31.meta
meta.update( compress='lzw' )
cru_ts31 = cru_ts31.read( 1 )
operation_switch = { 'add':add, 'mult':mult, 'div':div }
downscaled = operation_switch[ downscaling_operation ]( baseline_arr, cru_ts31 )
# this is hardwired stuff for this fairly hardwired script.
output_filename = os.path.basename( anomaly_fn ).replace( 'anom', 'downscaled' )
output_filename = os.path.join( output_path, output_filename )
output_arr = baseline_arr * cru_ts31 # multiply since it was relative anomalies
if 'transform' in meta.keys():
meta.pop( 'transform' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( output_arr, 1 )
return output_filename
partial_f = partial( f, baseline_arr=cru_cl20_arr, output_path=output_path )
cru_ts31 = file_list.apply( lambda fn: partial_f( anomaly_fn=fn ) )
return output_path
if __name__ == '__main__':
import rasterio, xray, os, glob, affine
from rasterio.warp import reproject, RESAMPLING
import geopandas as gpd
import pandas as pd
import numpy as np
from collections import OrderedDict
from shapely.geometry import Point
from pathos import multiprocessing as mp
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess cmip5 input netcdf files to a common type and single files' )
parser.add_argument( "-hi", "--cru_ts31", action='store', dest='cru_ts31', type=str, help="path to historical CRU TS3.1 input NetCDF file" )
parser.add_argument( "-ci", "--cl20_path", action='store', dest='cl20_path', type=str, help="path to historical CRU TS2.0 Climatology input directory in single-band GTiff Format" )
parser.add_argument( "-tr", "--template_raster_fn", action='store', dest='template_raster_fn', type=str, help="path to ALFRESCO Formatted template raster to match outputs to." )
parser.add_argument( "-base", "--base_path", action='store', dest='base_path', type=str, help="string path to the folder to put the output files into" )
parser.add_argument( "-bt", "--year_begin", action='store', dest='year_begin', type=int, help="string in format YYYY of the beginning year in the series" )
parser.add_argument( "-et", "--year_end", action='store', dest='year_end', type=int, help="string in format YYYY of the ending year in the series" )
parser.add_argument( "-cbt", "--climatology_begin_time", nargs='?', const='196101', action='store', dest='climatology_begin', type=str, help="string in format YYYY of the beginning year of the climatology period" )
parser.add_argument( "-cet", "--climatology_end_time", nargs='?', const='199012', action='store', dest='climatology_end', type=str, help="string in format YYYY of the ending year of the climatology period" )
parser.add_argument( "-nc", "--ncores", nargs='?', const=2, action='store', dest='ncores', type=int, help="integer valueof number of cores to use. default:2" )
parser.add_argument( "-at", "--anomalies_calc_type", nargs='?', const='absolute', action='store', dest='anomalies_calc_type', type=str, help="string of 'proportional' or 'absolute' to inform of anomalies calculation type to perform." )
parser.add_argument( "-m", "--metric", nargs='?', const='metric', action='store', dest='metric', type=str, help="string of whatever the metric type is of the outputs to put in the filename." )
parser.add_argument( "-dso", "--downscaling_operation", action='store', dest='downscaling_operation', type=str, help="string of 'add', 'mult', 'div', which refers to the type or downscaling operation to use." )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="string of the abbreviation used to identify the variable (i.e. cld)." )
# parse args
args = parser.parse_args()
# unpack args
ncores = args.ncores
base_path = args.base_path
cru_ts31 = args.cru_ts31
cl20_path = args.cl20_path
template_raster_fn = args.template_raster_fn
anomalies_calc_type = args.anomalies_calc_type
downscaling_operation = args.downscaling_operation
climatology_begin = args.climatology_begin
climatology_end = args.climatology_end
year_begin = args.year_begin
year_end = args.year_end
variable = args.variable
metric = args.metric
# make some output directories if they are not there already to dump
# our output files
anomalies_path = os.path.join( base_path, variable, 'anom' )
if not os.path.exists( anomalies_path ):
os.makedirs( anomalies_path )
downscaled_path = os.path.join( base_path, variable, 'downscaled' )
if not os.path.exists( downscaled_path ):
os.makedirs( downscaled_path )
# open with xray
cru_ts31 = xray.open_dataset( cru_ts31 )
# open template raster
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
template_meta.update( crs={'init':'epsg:3338'} )
# make a mask with values of 0=nodata and 1=data
template_raster_mask = template_raster.read_masks( 1 )
template_raster_mask[ template_raster_mask == 255 ] = 1
# calculate the anomalies
# this is temporary name change for the tmp (tas) data naming diff.
if variable == 'tas':
variable = 'tmp'
clim_ds = cru_ts31.loc[ {'time':slice(climatology_begin,climatology_end)} ]
climatology = clim_ds[ variable ].groupby( 'time.month' ).mean( 'time' )
if anomalies_calc_type == 'relative':
anomalies = cru_ts31[ variable ].groupby( 'time.month' ) / climatology
if anomalies_calc_type == 'absolute':
anomalies = cru_ts31[ variable ].groupby( 'time.month' ) - climatology
# reset the variable if tas
if variable == 'tmp':
variable = 'tas'
# rotate the anomalies to pacific centered latlong -- this is already in the greenwich latlong
dat_pcll, lons_pcll = shiftgrid( 0., anomalies, anomalies.lon.data )
# # generate an expanded extent (from the template_raster) to interpolate across
template_raster = rasterio.open( template_raster_fn )
# output_resolution = (1000.0, 1000.0) # hardwired, but we are building this for IEM which requires 1km
template_meta = template_raster.meta
# # interpolate to a new grid
# get longitudes and latitudes using meshgrid
lo, la = [ i.ravel() for i in np.meshgrid( lons_pcll, anomalies.lat ) ] # mesh the lons/lats
# convert into GeoDataFrame and drop all the NaNs
df_list = [ pd.DataFrame({ 'anom':i.ravel(), 'lat':la, 'lon':lo }).dropna( axis=0, how='any' ) for i in dat_pcll ]
xi, yi = np.meshgrid( lons_pcll, anomalies.lat.data )
# meshgrid_tuple = np.meshgrid( lons_pcll, anomalies.lat.data )
# argument setup
src_transform = affine.Affine( 0.5, 0.0, -180.0, 0.0, -0.5, 90.0 )
src_crs = {'init':'epsg:4326'}
src_nodata = -9999.0
# output_filenames setup
years = np.arange( year_begin, year_end+1, 1 ).astype( str ).tolist()
months = [ i if len(i)==2 else '0'+i for i in np.arange( 1, 12+1, 1 ).astype( str ).tolist() ]
month_year = [ (month, year) for year in years for month in months ]
output_filenames = [ os.path.join( anomalies_path, '_'.join([ variable,metric,'cru_ts31_anom',month,year])+'.tif' )
for month, year in month_year ]
# build a list of keyword args to pass to the pool of workers.
args_list = [ {'df':df, 'meshgrid_tuple':(xi, yi), 'lons_pcll':lons_pcll, \
'template_raster_fn':template_raster_fn, 'src_transform':src_transform, \
'src_crs':src_crs, 'src_nodata':src_nodata, 'output_filename':fn } \
for df, fn in zip( df_list, output_filenames ) ]
# interpolate / reproject / resample the anomalies to match template_raster_fn
pool = mp.Pool( processes=ncores )
out = pool.map( lambda args: run( **args ), args_list )
pool.close()
# To Complete the CRU TS3.1 Downscaling we need the following:
# read in the pre-processed CL2.0 Cloud Climatology
l = sorted( glob.glob( os.path.join( cl20_path, '*.tif' ) ) ) # this could catch you.
cl20_dict = { month:rasterio.open( fn ).read( 1 ) for month, fn in zip( months, l ) }
# group the data by months
out = pd.Series( out )
out_months = out.apply( fn_month_grouper )
months_grouped = out.groupby( out_months )
# unpack groups for parallelization and make a list of tuples of arguments to pass to the downscale function
mg = [(i,j) for i,j in months_grouped ]
args_list = [ ( i[1], cl20_dict[i[0]], downscaled_path, downscaling_operation ) for i in mg ]
# downscale / write to disk
pool = mp.Pool( processes=ncores )
out = pool.map( lambda args: downscale_cru_historical( *args ), args_list )
pool.close()
# # # # # HOW TO RUN THE APPLICATION # # # # # # #
# # input args -- argparse it
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '10'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_ts31'
# cru_ts31 = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS31/cru_ts_3_10.1901.2009.tmp.nc' # cru_ts_3_10.1901.2009.reh.dat.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_v2/cru_ts20/tas/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'absolute' # 'relative'
# downscaling_operation = 'add' # 'mult', 'div'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2009'
# variable = 'tas'
# metric = 'C'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'python cru_ts31_to_cl20_downscaling.py ' + args )
|
mit
|
justincassidy/scikit-learn
|
examples/ensemble/plot_gradient_boosting_oob.py
|
230
|
4762
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
RPGOne/Skynet
|
numpy-master/numpy/lib/npyio.py
|
2
|
75575
|
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode, is_pathlib_path
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if is_pathlib_path(file):
file = str(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif is_pathlib_path(file):
fid = file.open("rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
elif is_pathlib_path(file):
if not file.name.endswith('.npy'):
file = file.parent / (file.name + '.npy')
fid = file.open("wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
numpy.save : Save a single array to a binary file in NumPy format.
numpy.savetxt : Save an array to a file as plain text.
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
in ``.npy`` format. For a description of the ``.npy`` format, see
`numpy.lib.format` or the NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> test_array = np.random.rand(3, 2)
>>> test_vector = np.random.rand(4)
>>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
>>> loaded = np.load('/tmp/123.npz')
>>> print(np.array_equal(test_array, loaded['a']))
True
>>> print(np.array_equal(test_vector, loaded['b']))
True
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
elif is_pathlib_path(file):
if not file.name.endswith('.npz'):
file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return asbytes
else:
return asstr
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionadded:: 1.11.0
Also when a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as `usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
|
bsd-3-clause
|
mengxn/tensorflow
|
tensorflow/examples/tutorials/input_fn/boston.py
|
51
|
2709
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
# .predict() returns an iterator; convert to a list and print predictions
predictions = list(itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
|
apache-2.0
|
potis/ufldl_tutorial
|
display_network.py
|
6
|
3148
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import PIL
# This function visualizes filters in matrix A. Each column of A is a
# filter. We will reshape each column into a square image and visualizes
# on each cell of the visualization panel.
# All other parameters are optional, usually you do not need to worry
# about it.
# opt_normalize: whether we need to normalize the filter so that all of
# them can have similar contrast. Default value is true.
# opt_graycolor: whether we use gray as the heat map. Default is true.
# opt_colmajor: you can switch convention to row major for A. In that
# case, each row of A is a filter. Default value is false.
def display_network(A, filename='weights.png'):
opt_normalize = True
opt_graycolor = True
# Rescale
A = A - np.average(A)
# Compute rows & cols
(row, col) = A.shape
sz = int(np.ceil(np.sqrt(row)))
buf = 1
n = np.ceil(np.sqrt(col))
m = np.ceil(col / n)
image = np.ones(shape=(buf + m * (sz + buf), buf + n * (sz + buf)))
if not opt_graycolor:
image *= 0.1
k = 0
for i in range(int(m)):
for j in range(int(n)):
if k >= col:
continue
clim = np.max(np.abs(A[:, k]))
if opt_normalize:
image[buf + i * (sz + buf):buf + i * (sz + buf) + sz, buf + j * (sz + buf):buf + j * (sz + buf) + sz] = \
A[:, k].reshape(sz, sz) / clim
else:
image[buf + i * (sz + buf):buf + i * (sz + buf) + sz, buf + j * (sz + buf):buf + j * (sz + buf) + sz] = \
A[:, k].reshape(sz, sz) / np.max(np.abs(A))
k += 1
plt.imsave(filename, image, cmap=matplotlib.cm.gray)
def display_color_network(A, filename='weights.png'):
"""
# display receptive field(s) or basis vector(s) for image patches
#
# A the basis, with patches as column vectors
# In case the midpoint is not set at 0, we shift it dynamically
:param A:
:param file:
:return:
"""
if np.min(A) >= 0:
A = A - np.mean(A)
cols = np.round(np.sqrt(A.shape[1]))
channel_size = A.shape[0] / 3
dim = np.sqrt(channel_size)
dimp = dim + 1
rows = np.ceil(A.shape[1] / cols)
B = A[0:channel_size, :]
C = A[channel_size:2 * channel_size, :]
D = A[2 * channel_size:3 * channel_size, :]
B = B / np.max(np.abs(B))
C = C / np.max(np.abs(C))
D = D / np.max(np.abs(D))
# Initialization of the image
image = np.ones(shape=(dim * rows + rows - 1, dim * cols + cols - 1, 3))
for i in range(int(rows)):
for j in range(int(cols)):
# This sets the patch
image[i * dimp:i * dimp + dim, j * dimp:j * dimp + dim, 0] = B[:, i * cols + j].reshape(dim, dim)
image[i * dimp:i * dimp + dim, j * dimp:j * dimp + dim, 1] = C[:, i * cols + j].reshape(dim, dim)
image[i * dimp:i * dimp + dim, j * dimp:j * dimp + dim, 2] = D[:, i * cols + j].reshape(dim, dim)
image = (image + 1) / 2
PIL.Image.fromarray(np.uint8(image * 255), 'RGB').save(filename)
return 0
|
mit
|
jni/networkx
|
examples/drawing/labels_and_colors.py
|
44
|
1330
|
#!/usr/bin/env python
"""
Draw a graph with matplotlib, color by degree.
You must have matplotlib for this to work.
"""
__author__ = """Aric Hagberg ([email protected])"""
import matplotlib.pyplot as plt
import networkx as nx
G=nx.cubical_graph()
pos=nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G,pos,
nodelist=[0,1,2,3],
node_color='r',
node_size=500,
alpha=0.8)
nx.draw_networkx_nodes(G,pos,
nodelist=[4,5,6,7],
node_color='b',
node_size=500,
alpha=0.8)
# edges
nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)
nx.draw_networkx_edges(G,pos,
edgelist=[(0,1),(1,2),(2,3),(3,0)],
width=8,alpha=0.5,edge_color='r')
nx.draw_networkx_edges(G,pos,
edgelist=[(4,5),(5,6),(6,7),(7,4)],
width=8,alpha=0.5,edge_color='b')
# some math labels
labels={}
labels[0]=r'$a$'
labels[1]=r'$b$'
labels[2]=r'$c$'
labels[3]=r'$d$'
labels[4]=r'$\alpha$'
labels[5]=r'$\beta$'
labels[6]=r'$\gamma$'
labels[7]=r'$\delta$'
nx.draw_networkx_labels(G,pos,labels,font_size=16)
plt.axis('off')
plt.savefig("labels_and_colors.png") # save as png
plt.show() # display
|
bsd-3-clause
|
jopohl/urh
|
data/pyinstaller_helper.py
|
1
|
2642
|
import os
import shutil
import sys
HIDDEN_IMPORTS = ["packaging.specifiers", "packaging.requirements", "pkg_resources.py2_warn",
"numpy.core._methods", "numpy.core._dtype_ctypes",
"numpy.random.common", "numpy.random.entropy", "numpy.random.bounded_integers"]
DATA = [("src/urh/dev/native/lib/shared", "."), ("src/urh/plugins", "urh/plugins"), ]
EXCLUDE = ["matplotlib"]
def run_pyinstaller(cmd_list: list, env: list=None):
cmd = " ".join(cmd_list)
print(cmd, flush=True)
env = [] if env is None else env
if env:
os.system(" ".join(env) + " " + cmd)
else:
os.system(cmd)
if __name__ == '__main__':
cmd = ["pyinstaller", "--clean"]
if sys.platform == "darwin":
cmd.append("--onefile")
for hidden_import in HIDDEN_IMPORTS:
cmd.append("--hidden-import={}".format(hidden_import))
for src, dst in DATA:
cmd.append("--add-data")
cmd.append('"{}{}{}"'.format(src, os.pathsep, dst))
for exclude in EXCLUDE:
cmd.append("--exclude-module={}".format(exclude))
urh_path = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
if sys.platform == "darwin":
cmd.append('--icon="{}"'.format(os.path.join(urh_path, "data/icons/appicon.icns")))
else:
cmd.append('--icon="{}"'.format(os.path.join(urh_path, "data/icons/appicon.ico")))
cmd.extend(["--distpath", "./pyinstaller"])
urh_cmd = cmd + ["--name=urh", "--windowed", "--workpath", "./urh_build",
os.path.join(urh_path, "src/urh/main.py")]
urh_debug_cmd = cmd + ["--name=urh_debug", "--workpath", "./urh_debug_build",
os.path.join(urh_path, "src/urh/main.py")]
cli_cmd = cmd + ["--workpath", "./urh_cli_build",
os.path.join(urh_path, "src/urh/cli/urh_cli.py")]
os.makedirs("./pyinstaller")
if sys.platform == "darwin":
run_pyinstaller(urh_cmd, env=["DYLD_LIBRARY_PATH=src/urh/dev/native/lib/shared"])
import plistlib
with open("pyinstaller/urh.app/Contents/Info.plist", "rb") as f:
p = plistlib.load(f)
p["NSHighResolutionCapable"] = True
p["NSRequiresAquaSystemAppearance"] = True
with open("pyinstaller/urh.app/Contents/Info.plist", "wb") as f:
plistlib.dump(p, f)
else:
for cmd in [urh_cmd, cli_cmd, urh_debug_cmd]:
run_pyinstaller(cmd)
shutil.copy("./pyinstaller/urh_cli/urh_cli.exe", "./pyinstaller/urh/urh_cli.exe")
shutil.copy("./pyinstaller/urh_debug/urh_debug.exe", "./pyinstaller/urh/urh_debug.exe")
|
gpl-3.0
|
mhdella/scikit-learn
|
examples/text/document_classification_20newsgroups.py
|
222
|
10500
|
"""
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
bsd-3-clause
|
tmills/neural-assertion
|
scripts/keras/multitask/assertion_multitask_optimize.py
|
1
|
7074
|
#!/usr/bin/env python
## Common python modules:
import os.path
import pickle
import random
import sys
## library imports:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
import sklearn as sk
import sklearn.cross_validation
from sklearn.cross_validation import train_test_split
from zipfile import ZipFile
## Local imports:
from random_search import RandomSearch
import cleartk_io as ctk_io
import nn_models
batch_size = (64, 128, 256, 512)
#filters = ((128,), (256,), (512,), (1024,))
filters = ((128,),)
#layers = ((64,), (128,), (256,), (512,), (1024,))
layers = ((64,), (128,))
embed_dim = (10, 25, 50, 100, 200)
#widths = ((2,3,), (3,4), (2,3,4), (3,4,5), (2,3,4,5))
widths = ((2,3,),)
distances = (True, False)
start_symbol = "<e>"
end_symbol = "</e>"
def get_random_config(weights=None):
config = {}
config['distances'] = random.choice(distances)
config['batch_size'] = random.choice(batch_size)
config['filters'] = random.choice(filters)
config['layers'] = random.choice(layers)
config['embed_dim'] = random.choice(embed_dim)
config['widths'] = random.choice(widths)
return config
def run_one_eval(epochs, config, train_x, train_y, valid_x, valid_y, vocab_size, output_dims_list, weights):
print("Running with config: %s" % (config) )
np.random.seed(1337)
stopper = nn_models.get_early_stopper()
model = nn_models.get_multitask_cnn(train_x.shape, vocab_size, output_dims_list, conv_layers=config['filters'], fc_layers=config['layers'], embed_dim=config['embed_dim'], filter_widths=config['widths'])
history = model.fit(train_x, train_y,
nb_epoch=max(2, epochs),
batch_size=config['batch_size'],
verbose=1,
validation_data=(valid_x, valid_y),
callbacks=[stopper])
pred_y = model.predict(valid_x)
tp = [calc_tp(valid_y[i], pred_y[i], output_dims_list[i]) for i in range(len(pred_y))]
fp = [calc_fp(valid_y[i], pred_y[i], output_dims_list[i]) for i in range(len(pred_y))]
fn = [calc_fn(valid_y[i], pred_y[i], output_dims_list[i]) for i in range(len(pred_y))]
if sum(tp) > 0:
print("tp = %s" % tp)
if sum(fp) > 0:
print("fp = %s" % fp)
if sum(fn) > 0:
print("fn = %s" % fn)
recalls = [0 if tp[i] == 0 else float(tp[i]) / (tp[i] + fn[i]) for i in range(len(pred_y))]
precision = [0 if tp[i] == 0 else float(tp[i]) / (tp[i] + fp[i]) for i in range(len(pred_y))]
f1 = [calc_f1(recalls[i], precision[i]) for i in range(len(pred_y))]
loss = 1 - np.mean(f1)
print("Returning loss: %f" % (loss) )
#loss = history.history['val_loss'][-1]
return loss
def dim2index(dim):
#return 1 if dim == 2 else dim-1
return 1
def calc_f1(recall, precision):
if recall == 0.0 or precision == 0.0:
return 0
else:
return 2 * recall * precision / (recall + precision)
def calc_tp(gold_y, pred_y, dims):
''' Get the index of all the positives in the gold vector, use that as an index into the predicted vector
and count the number of times that value is > 0.5
'''
if dims == 1:
return len(np.where(pred_y[np.where(gold_y > 0.5)] > 0.5)[0])
else:
return float(len(np.where(pred_y.argmax(dim2index(dims))[np.where(gold_y.argmax(dim2index(dims)) > 0)] > 0)[0]))
def calc_fp(gold_y, pred_y, dims):
if dims == 1:
return len(np.where(pred_y[np.where(gold_y < 0.5)] > 0.5)[0])
else:
return float(len(np.where(pred_y.argmax(dim2index(dims))[np.where(gold_y.argmax(dim2index(dims)) == 0)] > 0)[0]))
def calc_fn(gold_y, pred_y, dims):
if dims == 1:
return len(np.where(pred_y[np.where(gold_y > 0.5)] < 0.5)[0])
else:
return float(len(np.where(pred_y.argmax(dim2index(dims))[np.where(gold_y.argmax(dim2index(dims)) > 0)] == 0)[0]))
def main(args):
if len(args) < 1:
sys.stderr.write("Error - one required argument: <data directory>\n")
sys.exit(-1)
working_dir = args[0]
print("Reading data...")
Y, outcome_map, outcome_list, X, feature_alphabet = ctk_io.read_multitask_token_sequence_data(working_dir)
start_ind = feature_alphabet[start_symbol]
end_ind = feature_alphabet[end_symbol]
train_x, valid_x, train_y, valid_y = train_test_split(X, Y, test_size=0.2, random_state=7)
# X_distance = get_distance_features(X, start_ind, end_ind)
print("Shape of X is %s and Y is %s" % (str(X.shape), str(Y.shape)))
num_examples, dimension = X.shape
num_y_examples, num_labels = Y.shape
assert num_examples == num_y_examples
weights = None
if len(args) > 1:
weights = ctk_io.read_embeddings(args[1], feats_alphabet)
train_y_adj, train_indices = ctk_io.flatten_outputs(train_y)
valid_y_adj, valid_indices = ctk_io.flatten_outputs(valid_y)
if not train_indices == valid_indices:
print("Error: training and valid sets have different index sets -- may be missing some labels in one set or the other")
sys.exit(-1)
output_dims_list = []
train_y_list = []
valid_y_list = []
indices = train_indices
for i in range(len(indices)-1):
label_dims = indices[i+1] - indices[i]
output_dims_list.append(label_dims)
if label_dims == 1:
train_y_list.append(train_y_adj[:, indices[i]])
valid_y_list.append(valid_y_adj[:, indices[i]])
else:
train_y_list.append(train_y_adj[:, indices[i]:indices[i+1]])
valid_y_list.append(valid_y_adj[:, indices[i]:indices[i+1]])
print("Dimensions of label %d are %s" % (i, str(train_y_list[-1].shape) ) )
## pass a function to the search that it uses to get a random config
## and a function that it will get an eval given (e)pochs and (c)onfig file:
optim = RandomSearch(lambda: get_random_config(weights), lambda e, c: run_one_eval(e, c, train_x, train_y_list, valid_x, valid_y_list, len(feature_alphabet), output_dims_list, weights ) )
best_config = optim.optimize(max_iter=27)
open(os.path.join(working_dir, 'model_0.config'), 'w').write( str(best_config) )
print("Best config returned by optimizer is %s" % str(best_config) )
def get_distance_features(X, start_symbol, end_symbol):
dist = np.zeros_like(X)
dist = np.expand_dims(dist, 2)
other_dim = X.shape[1]
for row_ind in range(X.shape[0]):
left_ind = np.where(X[row_ind] == start_symbol)[0][0]
right_ind = np.where(X[row_ind] == end_symbol)[0][0]
dist[row_ind, 0:left_ind, 0] += (np.arange(-left_ind, 0) / other_dim)
dist[row_ind, right_ind+1:, 0] += (np.arange(1, other_dim-right_ind) / other_dim)
return dist
if __name__ == "__main__":
main(sys.argv[1:])
|
apache-2.0
|
jreback/pandas
|
pandas/core/dtypes/cast.py
|
1
|
58216
|
"""
Routines for casting.
"""
from __future__ import annotations
from contextlib import suppress
from datetime import datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
cast,
)
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing, tslib
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import DatetimeArray, ExtensionArray
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
return bool(
isinstance(obj, ABCSeries)
and is_object_dtype(obj.dtype)
and any(isinstance(v, ABCSeries) for v in obj._values)
)
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = Timedelta(value)
return value
def maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:
"""
Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting
into a numpy array. Failing to unbox would risk dropping nanoseconds.
Notes
-----
Caller is responsible for checking dtype.kind in ["m", "M"]
"""
if is_valid_nat_for_dtype(value, dtype):
# GH#36541: can't fill array directly with pd.NaT
# > np.empty(10, dtype="datetime64[64]").fill(pd.NaT)
# ValueError: cannot convert float NaN to integer
value = dtype.type("NaT", "ns")
elif isinstance(value, Timestamp):
if value.tz is None:
value = value.to_datetime64()
elif isinstance(value, Timedelta):
value = value.to_timedelta64()
_disallow_mismatched_datetimelike(value, dtype)
return value
def _disallow_mismatched_datetimelike(value, dtype: DtypeObj):
"""
numpy allows np.array(dt64values, dtype="timedelta64[ns]") and
vice-versa, but we do not want to allow this, so we need to
check explicitly
"""
vdtype = getattr(value, "dtype", None)
if vdtype is None:
return
elif (vdtype.kind == "m" and dtype.kind == "M") or (
vdtype.kind == "M" and dtype.kind == "m"
):
raise TypeError(f"Cannot cast {repr(value)} to {dtype}")
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if isinstance(dtype, DatetimeTZDtype):
# convert to datetime and change timezone
i8values = result.astype("i8", copy=False)
cls = dtype.construct_array_type()
# equiv: DatetimeArray(i8values).tz_localize("UTC").tz_convert(dtype.tz)
result = cls._simple_new(i8values, dtype=dtype)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.floating import Float64Dtype
from pandas.core.arrays.integer import Int64Dtype, _IntegerDtype
if how in ["add", "cumsum", "sum", "prod"]:
if dtype == np.dtype(bool):
return np.dtype(np.int64)
elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):
return Int64Dtype()
elif how in ["mean", "median", "var"] and isinstance(
dtype, (BooleanDtype, _IntegerDtype)
):
return Float64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
):
return obj
try:
result = cls._from_sequence(obj, dtype=dtype)
except Exception:
# We can't predict what downstream EA constructors may raise
result = obj
return result
def maybe_upcast_putmask(result: np.ndarray, mask: np.ndarray) -> np.ndarray:
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
Returns
-------
result : ndarray
Examples
--------
>>> arr = np.arange(1, 6)
>>> mask = np.array([False, True, False, True, True])
>>> result = maybe_upcast_putmask(arr, mask)
>>> result
array([ 1., nan, 3., nan, nan])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
# NB: we never get here with result.dtype.kind in ["m", "M"]
if mask.any():
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, np.nan)
if new_dtype != result.dtype:
result = result.astype(new_dtype, copy=True)
np.place(result, mask, np.nan)
return result
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype or ExtensionDtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
Raises
------
ValueError
If fill_value is a non-scalar and dtype is not object.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
if dtype == np.object_ or dtype.kind in ["U", "S"]:
# We treat string-like dtypes as object, and _always_ fill
# with np.nan
fill_value = np.nan
dtype = np.dtype(np.object_)
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
elif is_valid_nat_for_dtype(fill_value, dtype):
# e.g. pd.NA, which is not accepted by Timestamp constructor
fill_value = np.datetime64("NaT", "ns")
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
except (TypeError, ValueError):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
if (
is_integer(fill_value)
or (is_float(fill_value) and not np.isnan(fill_value))
or isinstance(fill_value, str)
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
elif is_valid_nat_for_dtype(fill_value, dtype):
# e.g pd.NA, which is not accepted by the Timedelta constructor
fill_value = np.timedelta64("NaT", "ns")
else:
try:
fv = Timedelta(fill_value)
except ValueError:
dtype = np.dtype(np.object_)
else:
if fv is NaT:
# NaT has no `to_timedelta64` method
fill_value = np.timedelta64("NaT", "ns")
else:
fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif not isinstance(fill_value, datetime):
dtype = np.dtype(np.object_)
elif fill_value.tzinfo is None:
dtype = np.dtype(np.object_)
elif not tz_compare(fill_value.tzinfo, dtype.tz):
# TODO: sure we want to cast here?
dtype = np.dtype(np.object_)
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.float64 and dtype is np.float32
dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
elif fill_value is None or fill_value is libmissing.NA:
# Note: we already excluded dt64/td64 dtypes above
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
if fill_value is not libmissing.NA:
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.dtype(np.object_)
fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
def infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if not is_list_like(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar.
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
# a 1-element ndarray
if isinstance(val, np.ndarray):
if val.ndim != 0:
msg = "invalid ndarray passed to infer_dtype_from_scalar"
raise ValueError(msg)
dtype = val.dtype
val = lib.item_from_zerodim(val)
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.dtype(object)
elif isinstance(val, (np.datetime64, datetime)):
try:
val = Timestamp(val)
except OutOfBoundsDatetime:
return np.dtype(object), val
if val is NaT or val.tz is None:
dtype = np.dtype("M8[ns]")
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = Timedelta(val).value
dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
elif is_integer(val):
if isinstance(val, np.integer):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.int64)
try:
np.array(val, dtype=dtype)
except OverflowError:
dtype = np.array(val).dtype
elif is_float(val):
if isinstance(val, np.floating):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.float64)
elif is_complex(val):
dtype = np.dtype(np.complex_)
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
elif lib.is_interval(val):
subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]
dtype = IntervalDtype(subtype=subtype)
return dtype, val
def dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:
"""
Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
Parameters
----------
d: dict-like object
Returns
-------
dict
"""
return {maybe_box_datetimelike(key): value for key, value in d.items()}
def infer_dtype_from_array(
arr, pandas_dtype: bool = False
) -> Tuple[DtypeObj, ArrayLike]:
"""
Infer the dtype from an array.
Parameters
----------
arr : array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(dtype('O'), [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
raise TypeError("'arr' must be list-like")
if pandas_dtype and is_extension_array_dtype(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.dtype(np.object_), arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""
Try to infer an object's dtype, for use in arithmetic ops.
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
dtype('int64')
"""
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(
values: np.ndarray,
fill_value: Scalar = np.nan,
copy: bool = False,
) -> Tuple[np.ndarray, Scalar]:
"""
Provide explicit type promotion and coercion.
Parameters
----------
values : np.ndarray
The array that we may want to upcast.
fill_value : what we want to fill with
copy : bool, default True
If True always make a copy even if no upcast is required.
Returns
-------
values: np.ndarray
the original array, possibly upcast
fill_value:
the fill value, possibly upcast
"""
new_dtype, fill_value = maybe_promote(values.dtype, fill_value)
# We get a copy in all cases _except_ (values.dtype == new_dtype and not copy)
values = values.astype(new_dtype, copy=copy)
return values, fill_value
def invalidate_string_dtypes(dtype_set: Set[DtypeObj]):
"""
Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - {np.dtype("S").type, np.dtype("<U").type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
def astype_dt64_to_dt64tz(
values: ArrayLike, dtype: DtypeObj, copy: bool, via_utc: bool = False
) -> "DatetimeArray":
# GH#33401 we have inconsistent behaviors between
# Datetimeindex[naive].astype(tzaware)
# Series[dt64].astype(tzaware)
# This collects them in one place to prevent further fragmentation.
from pandas.core.construction import ensure_wrapped_if_datetimelike
values = ensure_wrapped_if_datetimelike(values)
values = cast("DatetimeArray", values)
aware = isinstance(dtype, DatetimeTZDtype)
if via_utc:
# Series.astype behavior
assert values.tz is None and aware # caller is responsible for checking this
dtype = cast(DatetimeTZDtype, dtype)
if copy:
# this should be the only copy
values = values.copy()
# FIXME: GH#33401 this doesn't match DatetimeArray.astype, which
# goes through the `not via_utc` path
return values.tz_localize("UTC").tz_convert(dtype.tz)
else:
# DatetimeArray/DatetimeIndex.astype behavior
if values.tz is None and aware:
dtype = cast(DatetimeTZDtype, dtype)
return values.tz_localize(dtype.tz)
elif aware:
# GH#18951: datetime64_tz dtype but not equal means different tz
dtype = cast(DatetimeTZDtype, dtype)
result = values.tz_convert(dtype.tz)
if copy:
result = result.copy()
return result
elif values.tz is not None:
result = values.tz_convert("UTC").tz_localize(None)
if copy:
result = result.copy()
return result
raise NotImplementedError("dtype_equal case should be handled elsewhere")
def astype_td64_unit_conversion(
values: np.ndarray, dtype: np.dtype, copy: bool
) -> np.ndarray:
"""
By pandas convention, converting to non-nano timedelta64
returns an int64-dtyped array with ints representing multiples
of the desired timedelta unit. This is essentially division.
Parameters
----------
values : np.ndarray[timedelta64[ns]]
dtype : np.dtype
timedelta64 with unit not-necessarily nano
copy : bool
Returns
-------
np.ndarray
"""
if is_dtype_equal(values.dtype, dtype):
if copy:
return values.copy()
return values
# otherwise we are converting to non-nano
result = values.astype(dtype, copy=False) # avoid double-copying
result = result.astype(np.float64)
mask = isna(values)
np.putmask(result, mask, np.nan)
return result
def astype_nansafe(
arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype or ExtensionDtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
if arr.ndim > 1:
# Make sure we are doing non-copy ravel and reshape.
flags = arr.flags
flat = arr.ravel("K")
result = astype_nansafe(flat, dtype, copy=copy, skipna=skipna)
order = "F" if flags.f_contiguous else "C"
return result.reshape(arr.shape, order=order)
# We get here with 0-dim from sparse
arr = np.atleast_1d(arr)
# dispatch on extension dtype if needed
if isinstance(dtype, ExtensionDtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
elif not isinstance(dtype, np.dtype):
raise ValueError("dtype must be np.dtype or ExtensionDtype")
if arr.dtype.kind in ["m", "M"] and (
issubclass(dtype.type, str) or dtype == object
):
from pandas.core.construction import ensure_wrapped_if_datetimelike
arr = ensure_wrapped_if_datetimelike(arr)
return arr.astype(dtype, copy=copy)
if issubclass(dtype.type, str):
return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False)
elif is_datetime64_dtype(arr):
if dtype == np.int64:
warnings.warn(
f"casting {arr.dtype} values to int64 with .astype(...) "
"is deprecated and will raise in a future version. "
"Use .view(...) instead.",
FutureWarning,
# stacklevel chosen to be correct when reached via Series.astype
stacklevel=7,
)
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
warnings.warn(
f"casting {arr.dtype} values to int64 with .astype(...) "
"is deprecated and will raise in a future version. "
"Use .view(...) instead.",
FutureWarning,
# stacklevel chosen to be correct when reached via Series.astype
stacklevel=7,
)
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
elif dtype.kind == "m":
return astype_td64_unit_conversion(arr, dtype, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr, dtype)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr.dtype) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.astype(dtype, copy=copy)
def soft_convert_objects(
values: np.ndarray,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
copy: bool = True,
):
"""
Try to coerce datetime, timedelta, and numeric object-dtype columns
to inferred dtype.
Parameters
----------
values : np.ndarray[object]
datetime : bool, default True
numeric: bool, default True
timedelta : bool, default True
copy : bool, default True
Returns
-------
np.ndarray
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(copy, "copy")
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError("At least one of datetime, numeric or timedelta must be True.")
# Soft conversions
if datetime or timedelta:
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
values = lib.maybe_convert_objects(
values, convert_datetime=datetime, convert_timedelta=timedelta
)
except OutOfBoundsDatetime:
return values
if numeric and is_object_dtype(values.dtype):
converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
return values
def convert_dtypes(
input_array: AnyArrayLike,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
) -> Dtype:
"""
Convert objects to best possible type, and optionally,
to types supporting ``pd.NA``.
Parameters
----------
input_array : ExtensionArray, Index, Series or np.ndarray
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
Whether, if possible, conversion can be done to floating extension types.
If `convert_integer` is also True, preference will be give to integer
dtypes if the floats can be faithfully casted to integers.
Returns
-------
dtype
new dtype
"""
is_extension = is_extension_array_dtype(input_array.dtype)
if (
convert_string or convert_integer or convert_boolean or convert_floating
) and not is_extension:
try:
inferred_dtype = lib.infer_dtype(input_array)
except ValueError:
# Required to catch due to Period. Can remove once GH 23553 is fixed
inferred_dtype = input_array.dtype
if not convert_string and is_string_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_integer:
target_int_dtype = "Int64"
if is_integer_dtype(input_array.dtype):
from pandas.core.arrays.integer import INT_STR_TO_DTYPE
inferred_dtype = INT_STR_TO_DTYPE.get(
input_array.dtype.name, target_int_dtype
)
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
inferred_dtype = target_int_dtype
else:
if is_integer_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_floating:
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE
inferred_float_dtype = FLOAT_STR_TO_DTYPE.get(
input_array.dtype.name, "Float64"
)
# if we could also convert to integer, check if all floats
# are actually integers
if convert_integer:
arr = input_array[notna(input_array)]
if (arr.astype(int) == arr).all():
inferred_dtype = "Int64"
else:
inferred_dtype = inferred_float_dtype
else:
inferred_dtype = inferred_float_dtype
else:
if is_float_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_boolean:
if is_bool_dtype(input_array.dtype):
inferred_dtype = "boolean"
else:
if isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
inferred_dtype = input_array.dtype
else:
inferred_dtype = input_array.dtype
return inferred_dtype
def maybe_castable(arr: np.ndarray) -> bool:
# return False to force a non-fastpath
assert isinstance(arr, np.ndarray) # GH 37024
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == "M":
return is_datetime64_ns_dtype(arr.dtype)
elif kind == "m":
return is_timedelta64_ns_dtype(arr.dtype)
return arr.dtype.name not in POSSIBLY_CAST_DTYPES
def maybe_infer_to_datetimelike(
value: Union[ArrayLike, Scalar], convert_dates: bool = False
):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
datetime/timedelta set
this is pretty strict in that a datetime/timedelta is REQUIRED
in addition to possible nulls/string likes
Parameters
----------
value : np.array / Series / Index / list-like
convert_dates : bool, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
if isinstance(value, (ABCIndex, ABCExtensionArray)):
if not is_object_dtype(value.dtype):
raise ValueError("array-like value must be object-dtype")
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if v.ndim != 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
# GH19671
# tznaive only
v = tslib.array_to_datetime(v, require_iso8601=True, errors="raise")[0]
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
from pandas import DatetimeIndex
try:
values, tz = conversion.datetime_to_datetime64(v)
except (ValueError, TypeError):
pass
else:
return DatetimeIndex(values).tz_localize("UTC").tz_convert(tz=tz)
except TypeError:
# e.g. <class 'numpy.timedelta64'> is not convertible to datetime
pass
return v.reshape(shape)
def try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas import to_timedelta
try:
td_values = to_timedelta(v)
except ValueError:
return v.reshape(shape)
else:
return np.asarray(td_values).reshape(shape)
inferred_type = lib.infer_datetimelike_array(ensure_object(v))
if inferred_type == "date" and convert_dates:
value = try_datetime(v)
elif inferred_type == "datetime":
value = try_datetime(v)
elif inferred_type == "timedelta":
value = try_timedelta(v)
elif inferred_type == "nat":
# if all NaT, return as datetime
if isna(v).all():
value = try_datetime(v)
else:
# We have at least a NaT and a string
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
value = try_timedelta(v)
if lib.infer_dtype(value, skipna=False) in ["mixed"]:
# cannot skip missing values, as NaT implies that the string
# is actually a datetime
value = try_datetime(v)
return value
def maybe_cast_to_datetime(value, dtype: Optional[DtypeObj]):
"""
try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
if not is_list_like(value):
raise TypeError("value must be listlike")
if dtype is not None:
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_datetime64tz or is_timedelta64:
# Force the dtype if needed.
msg = (
f"The '{dtype.name}' dtype has no unit. "
f"Please pass in '{dtype.name}[ns]' instead."
)
if is_datetime64:
# unpack e.g. SparseDtype
dtype = getattr(dtype, "subtype", dtype)
if not is_dtype_equal(dtype, DT64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("M8[ns]"):
if dtype.name == "datetime64":
raise ValueError(msg)
dtype = DT64NS_DTYPE
else:
raise TypeError(
f"cannot convert datetimelike to dtype [{dtype}]"
)
elif is_timedelta64 and not is_dtype_equal(dtype, TD64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("m8[ns]"):
if dtype.name == "timedelta64":
raise ValueError(msg)
dtype = TD64NS_DTYPE
else:
raise TypeError(f"cannot convert timedeltalike to dtype [{dtype}]")
if not is_sparse(value):
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype):
_disallow_mismatched_datetimelike(value, dtype)
try:
if is_datetime64:
value = to_datetime(value, errors="raise")
# GH 25843: Remove tz information since the dtype
# didn't specify one
if value.tz is not None:
value = value.tz_localize(None)
value = value._values
elif is_datetime64tz:
# The string check can be removed once issue #13712
# is solved. String data that is passed with a
# datetime64tz is assumed to be naive which should
# be localized to the timezone.
is_dt_string = is_string_dtype(value.dtype)
value = to_datetime(value, errors="raise").array
if is_dt_string:
# Strings here are naive, so directly localize
value = value.tz_localize(dtype.tz)
else:
# Numeric values are UTC at this point,
# so localize and convert
value = value.tz_localize("UTC").tz_convert(dtype.tz)
elif is_timedelta64:
value = to_timedelta(value, errors="raise")._values
except OutOfBoundsDatetime:
raise
except (ValueError, TypeError):
pass
# coerce datetimelike to object
elif is_datetime64_dtype(
getattr(value, "dtype", None)
) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
if value.dtype != DT64NS_DTYPE:
value = value.astype(DT64NS_DTYPE)
ints = np.asarray(value).view("i8")
return ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
raise TypeError(f"Cannot cast datetime64 to {dtype}")
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if is_array and value.dtype.kind in ["M", "m"]:
value = sanitize_to_nanoseconds(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif not (
is_array
and not (
issubclass(value.dtype.type, np.integer) or value.dtype == np.object_
)
):
value = maybe_infer_to_datetimelike(value)
return value
def sanitize_to_nanoseconds(values: np.ndarray) -> np.ndarray:
"""
Safely convert non-nanosecond datetime64 or timedelta64 values to nanosecond.
"""
dtype = values.dtype
if dtype.kind == "M" and dtype != DT64NS_DTYPE:
values = conversion.ensure_datetime64ns(values)
elif dtype.kind == "m" and dtype != TD64NS_DTYPE:
values = conversion.ensure_timedelta64ns(values)
return values
def find_common_type(types: List[DtypeObj]) -> DtypeObj:
"""
Find a common data type among the given dtypes.
Parameters
----------
types : list of dtypes
Returns
-------
pandas extension or numpy dtype
See Also
--------
numpy.find_common_type
"""
if not types:
raise ValueError("no types given")
first = types[0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
# get unique types (dict.fromkeys is used as order-preserving set())
types = list(dict.fromkeys(types).keys())
if any(isinstance(t, ExtensionDtype) for t in types):
for t in types:
if isinstance(t, ExtensionDtype):
res = t._get_common_dtype(types)
if res is not None:
return res
return np.dtype("object")
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
return np.dtype("datetime64[ns]")
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype("timedelta64[ns]")
# don't mix bool / int or float or complex
# this is different from numpy, which casts bool with float/int as int
has_bools = any(is_bool_dtype(t) for t in types)
if has_bools:
for t in types:
if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t):
return np.dtype("object")
return np.find_common_type(types, [])
def construct_2d_arraylike_from_scalar(
value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool
) -> np.ndarray:
if dtype.kind in ["m", "M"]:
value = maybe_unbox_datetimelike(value, dtype)
# Attempt to coerce to a numpy array
try:
arr = np.array(value, dtype=dtype, copy=copy)
except (ValueError, TypeError) as err:
raise TypeError(
f"DataFrame constructor called with incompatible data and dtype: {err}"
) from err
if arr.ndim != 0:
raise ValueError("DataFrame constructor not properly called!")
shape = (length, width)
return np.full(shape, arr)
def construct_1d_arraylike_from_scalar(
value: Scalar, length: int, dtype: Optional[DtypeObj]
) -> ArrayLike:
"""
create a np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
----------
value : scalar value
length : int
dtype : pandas_dtype or np.dtype
Returns
-------
np.ndarray / pandas type of length, filled with value
"""
if dtype is None:
try:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
except OutOfBoundsDatetime:
dtype = np.dtype(object)
if is_extension_array_dtype(dtype):
cls = dtype.construct_array_type()
subarr = cls._from_sequence([value] * length, dtype=dtype)
else:
if length and is_integer_dtype(dtype) and isna(value):
# coerce if we have nan for an integer dtype
dtype = np.dtype("float64")
elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"):
# we need to coerce to object dtype to avoid
# to allow numpy to take our string as a scalar value
dtype = np.dtype("object")
if not isna(value):
value = ensure_str(value)
elif dtype.kind in ["M", "m"]:
value = maybe_unbox_datetimelike(value, dtype)
subarr = np.empty(length, dtype=dtype)
subarr.fill(value)
return subarr
def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray:
"""
Transform any list-like object in a 1-dimensional numpy array of object
dtype.
Parameters
----------
values : any iterable which has a len()
Raises
------
TypeError
* If `values` does not have a len()
Returns
-------
1-dimensional numpy array of dtype object
"""
# numpy will try to interpret nested lists as further dimensions, hence
# making a 1D array that contains list-likes is a bit tricky:
result = np.empty(len(values), dtype="object")
result[:] = values
return result
def construct_1d_ndarray_preserving_na(
values: Sequence, dtype: Optional[DtypeObj] = None, copy: bool = False
) -> np.ndarray:
"""
Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
Parameters
----------
values : Sequence
dtype : numpy.dtype, optional
copy : bool, default False
Note that copies may still be made with ``copy=False`` if casting
is required.
Returns
-------
arr : ndarray[dtype]
Examples
--------
>>> np.array([1.0, 2.0, None], dtype='str')
array(['1.0', '2.0', 'None'], dtype='<U4')
>>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype=np.dtype('str'))
array(['1.0', '2.0', None], dtype=object)
"""
if dtype is not None and dtype.kind == "U":
subarr = lib.ensure_string_array(values, convert_na_value=False, copy=copy)
else:
if dtype is not None:
_disallow_mismatched_datetimelike(values, dtype)
subarr = np.array(values, dtype=dtype, copy=copy)
return subarr
def maybe_cast_to_integer_array(arr, dtype: Dtype, copy: bool = False):
"""
Takes any dtype and returns the casted version, raising for when data is
incompatible with integer/unsigned integer dtypes.
.. versionadded:: 0.24.0
Parameters
----------
arr : array-like
The array to cast.
dtype : str, np.dtype
The integer dtype to cast the array to.
copy: bool, default False
Whether to make a copy of the array before returning.
Returns
-------
ndarray
Array of integer or unsigned integer dtype.
Raises
------
OverflowError : the dtype is incompatible with the data
ValueError : loss of precision has occurred during casting
Examples
--------
If you try to coerce negative values to unsigned integers, it raises:
>>> pd.Series([-1], dtype="uint64")
Traceback (most recent call last):
...
OverflowError: Trying to coerce negative values to unsigned integers
Also, if you try to coerce float values to integers, it raises:
>>> pd.Series([1, 2, 3.5], dtype="int64")
Traceback (most recent call last):
...
ValueError: Trying to coerce float values to integers
"""
assert is_integer_dtype(dtype)
try:
if not hasattr(arr, "astype"):
casted = np.array(arr, dtype=dtype, copy=copy)
else:
casted = arr.astype(dtype, copy=copy)
except OverflowError as err:
raise OverflowError(
"The elements provided in the data cannot all be "
f"casted to the dtype {dtype}"
) from err
if np.array_equal(arr, casted):
return casted
# We do this casting to allow for proper
# data and dtype checking.
#
# We didn't do this earlier because NumPy
# doesn't handle `uint64` correctly.
arr = np.asarray(arr)
if is_unsigned_integer_dtype(dtype) and (arr < 0).any():
raise OverflowError("Trying to coerce negative values to unsigned integers")
if is_float_dtype(arr) or is_object_dtype(arr):
raise ValueError("Trying to coerce float values to integers")
def convert_scalar_for_putitemlike(scalar: Scalar, dtype: np.dtype) -> Scalar:
"""
Convert datetimelike scalar if we are setting into a datetime64
or timedelta64 ndarray.
Parameters
----------
scalar : scalar
dtype : np.dtype
Returns
-------
scalar
"""
if dtype.kind in ["m", "M"]:
scalar = maybe_box_datetimelike(scalar, dtype)
return maybe_unbox_datetimelike(scalar, dtype)
else:
validate_numeric_casting(dtype, scalar)
return scalar
def validate_numeric_casting(dtype: np.dtype, value: Scalar) -> None:
"""
Check that we can losslessly insert the given value into an array
with the given dtype.
Parameters
----------
dtype : np.dtype
value : scalar
Raises
------
ValueError
"""
if (
issubclass(dtype.type, (np.integer, np.bool_))
and is_float(value)
and np.isnan(value)
):
raise ValueError("Cannot assign nan to integer series")
if (
issubclass(dtype.type, (np.integer, np.floating, complex))
and not issubclass(dtype.type, np.bool_)
and is_bool(value)
):
raise ValueError("Cannot assign bool to float/integer series")
|
bsd-3-clause
|
codefordc/ERDA
|
python/buildcsv.py
|
1
|
3223
|
#! /usr/bin/env python
# encoding: utf-8
'''
Converts and merges multiple .xlsx Excel files containing DC emergency response
data into a single .csv file.
'''
import sys
import glob
from collections import defaultdict, OrderedDict
import pandas as pd
import csv
def address_quadrant(address):
'''Parses out the quadrant from an address string, if it contains one.'''
quad = address.strip().split(' ')
if 'NW' in quad:
return 'NW'
elif 'NE' in quad:
return "NE"
elif 'SW' in quad:
return 'SW'
elif 'SE' in quad:
return 'SE'
else:
return 'NA'
def normalize_address(address):
return address.strip().upper()
def print_summary(num_valid, errors):
num_no_address = sum(e[0] == 'no address' for e in errors)
num_blank_address = sum(e[0] == 'blank address' for e in errors)
print(num_valid, 'valid incident(s) processed.')
print(len(errors), 'incident(s) were excluded:')
print(num_no_address, 'incident(s) had an empty address field.')
print(num_blank_address, 'incident(s) had an address of "BLANK".')
def convert_to_csv(data_path, incidents):
csv_rows = [['Date', 'Dispatch Time', 'Address', 'Quadrant', 'Response Time', 'Unit']]
errors = []
addresses = defaultdict(int)
i = 0
for index, incident in incidents.iterrows():
i += 1
date = incident['Date'].date()
time = incident['Dispatch Time (HH:MM:SS)']
address = incident['Location']
if (not isinstance(address, str)):
errors.append(('no address', incident))
continue
address = normalize_address(address)
if (address == "BLANK"):
errors.append(('blank address', incident))
continue
addresses[address] += 1
quadrant = address_quadrant(address)
response_time = incident['Response Time (HH:MM:SS)']
unit = incident['Unit']
csv_rows.append([date, time, address, quadrant, response_time, unit])
csv_path = data_path + '/incidents.csv'
addresses_path = data_path + '/addresses.csv'
with open(csv_path, 'w') as csv_file:
writer = csv.writer(csv_file)
for row in csv_rows:
writer.writerow(row)
sorted_addresses = OrderedDict(sorted(addresses.items(), key=lambda t: t[0]))
with open(addresses_path, 'w') as addresses_file:
writer = csv.writer(addresses_file)
for address, count in sorted_addresses.items():
writer.writerow([address])
if (len(errors) > 0):
print_summary(len(csv_rows) - 1, errors)
def import_incident(row):
incident = []
for col in row:
incident.append(col.value)
return incident
def import_erda_file(file_name):
'''Imports an Excel file with the specified name.'''
print(file_name)
xl_file = pd.ExcelFile(file_name)
data_frame = xl_file.parse('DATA')
print(data_frame.shape[0], 'incident(s)')
return data_frame
def import_erda_files(data_path):
'''Imports all Excel files in the specified directory.'''
file_names = glob.glob(data_path + "/*.xlsx")
data_frames = []
for file_name in file_names:
data_frames.append(import_erda_file(file_name))
incidents = pd.concat(data_frames)
print(incidents.shape[0], 'incident(s) loaded.')
convert_to_csv(data_path, incidents)
def usage():
'''Prints a help string.'''
print("usage: buildcsv.py <dirname>")
if __name__ == "__main__":
if (len(sys.argv) == 2):
import_erda_files(sys.argv[1])
else:
usage()
|
bsd-3-clause
|
GuessWhoSamFoo/pandas
|
pandas/core/arrays/sparse.py
|
2
|
66808
|
"""
SparseArray data structure
"""
from __future__ import division
import numbers
import operator
import re
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex
from pandas._libs.tslibs import NaT
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
astype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
infer_dtype_from_scalar, maybe_convert_platform)
from pandas.core.dtypes.common import (
is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_dtype_equal,
is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
pandas_dtype)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries, ABCSparseSeries)
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
import pandas.io.formats.printing as printing
# ----------------------------------------------------------------------------
# Dtype
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the pandas ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ('_dtype', '_fill_value', '_is_na_fill_value')
def __init__(self, dtype=np.float64, fill_value=None):
# type: (Union[str, np.dtype, 'ExtensionDtype', type], Any) -> None
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.dtypes.common import (
pandas_dtype, is_string_dtype, is_scalar
)
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype('object')
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError("fill_value must be a scalar. Got {} "
"instead".format(fill_value))
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super(SparseDtype, self).__hash__()
def __eq__(self, other):
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if isinstance(other, compat.string_types):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value and
isinstance(self.fill_value, type(other.fill_value)) or
isinstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
from pandas.core.dtypes.missing import isna
return isna(self.fill_value)
@property
def _is_numeric(self):
from pandas.core.dtypes.common import is_object_dtype
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self):
from pandas.core.dtypes.common import is_bool_dtype
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return 'Sparse[{}, {}]'.format(self.subtype.name, self.fill_value)
def __repr__(self):
return self.name
@classmethod
def construct_array_type(cls):
return SparseArray
@classmethod
def construct_from_string(cls, string):
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
msg = "Could not construct SparseDtype from '{}'".format(string)
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
result = SparseDtype(sub_type)
except Exception:
raise TypeError(msg)
else:
msg = ("Could not construct SparseDtype from '{}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead.")
if has_fill_value and str(result) != string:
raise TypeError(msg.format(string))
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype):
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(
r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$"
)
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()['subtype']
has_fill_value = m.groupdict()['fill_value'] or has_fill_value
elif dtype == "Sparse":
subtype = 'float64'
else:
raise ValueError("Cannot parse {}".format(dtype))
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype):
dtype = getattr(dtype, 'dtype', dtype)
if (isinstance(dtype, compat.string_types) and
dtype.startswith("Sparse")):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif isinstance(dtype, cls):
return True
return isinstance(dtype, np.dtype) or dtype == 'Sparse'
def update_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the corret `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
fill_value = astype_nansafe(np.array(self.fill_value),
dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typically, pandas will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.astype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
str
"""
if isinstance(self.fill_value, compat.string_types):
return type(self.fill_value)
return self.subtype
# ----------------------------------------------------------------------------
# Array
_sparray_doc_kwargs = dict(klass='SparseArray')
def _get_fill(arr):
# type: (SparseArray) -> ndarray
"""
Create a 0-dim ndarray containing the fill value
Parameters
----------
arr : SparseArray
Returns
-------
fill_value : ndarray
0-dim ndarray with just the fill value.
Notes
-----
coerce fill_value to arr dtype if possible
int64 SparseArray can have NaN as fill_value if there is no missing
"""
try:
return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name):
"""
Perform a binary operation between two arrays.
Parameters
----------
left : Union[SparseArray, ndarray]
right : Union[SparseArray, ndarray]
op : Callable
The binary operation to perform
name str
Name of the callable.
Returns
-------
SparseArray
"""
# type: (SparseArray, SparseArray, Callable, str) -> Any
if name.startswith('__'):
# For lookups in _libs.sparse we need non-dunder op name
name = name[2:-2]
# dtype used to find corresponding sparse method
ltype = left.dtype.subtype
rtype = right.dtype.subtype
if not is_dtype_equal(ltype, rtype):
subtype = find_common_type([ltype, rtype])
ltype = SparseDtype(subtype, left.fill_value)
rtype = SparseDtype(subtype, right.fill_value)
# TODO(GH-23092): pass copy=False. Need to fix astype_nansafe
left = left.astype(ltype)
right = right.astype(rtype)
dtype = ltype.subtype
else:
dtype = ltype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(all='ignore'):
result = op(left.get_values(), right.get_values())
fill = op(_get_fill(left), _get_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_get_fill(left), _get_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.format(name=name)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = getattr(splib, opname)
with np.errstate(all='ignore'):
result, index, fill = sparse_op(
left_sp_values, left.sp_index, left.fill_value,
right_sp_values, right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
"""
wrap op result to have correct dtype
"""
if name.startswith('__'):
# e.g. __eq__ --> eq
name = name[2:-2]
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
fill_value = lib.item_from_zerodim(fill_value)
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype)
class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):
"""
An ExtensionArray for storing sparse data.
.. versionchanged:: 0.24.0
Implements the ExtensionArray interface.
Parameters
----------
data : array-like
A dense array of values to store in the SparseArray. This may contain
`fill_value`.
sparse_index : SparseIndex, optional
index : Index
fill_value : scalar, optional
Elements in `data` that are `fill_value` are not stored in the
SparseArray. For memory savings, this should be the most common value
in `data`. By default, `fill_value` depends on the dtype of `data`:
=========== ==========
data.dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool False
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The fill value is potentiall specified in three ways. In order of
precedence, these are
1. The `fill_value` argument
2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
a ``SparseDtype``
3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
is not a ``SparseDtype`` and `data` is a ``SparseArray``.
kind : {'integer', 'block'}, default 'integer'
The type of storage for sparse locations.
* 'block': Stores a `block` and `block_length` for each
contiguous *span* of sparse values. This is best when
sparse data tends to be clumped together, with large
regsions of ``fill-value`` values between sparse values.
* 'integer': uses an integer to store the location of
each sparse value.
dtype : np.dtype or SparseDtype, optional
The dtype to use for the SparseArray. For numpy dtypes, this
determines the dtype of ``self.sp_values``. For SparseDtype,
this determines ``self.sp_values`` and ``self.fill_value``.
copy : bool, default False
Whether to explicitly copy the incoming `data` array.
"""
__array_priority__ = 15
_pandas_ftype = 'sparse'
_subtyp = 'sparse_array' # register ABCSparseArray
def __init__(self, data, sparse_index=None, index=None, fill_value=None,
kind='integer', dtype=None, copy=False):
from pandas.core.internals import SingleBlockManager
if isinstance(data, SingleBlockManager):
data = data.internal_values()
if fill_value is None and isinstance(dtype, SparseDtype):
fill_value = dtype.fill_value
if isinstance(data, (type(self), ABCSparseSeries)):
# disable normal inference on dtype, sparse_index, & fill_value
if sparse_index is None:
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
if dtype is None:
dtype = data.dtype
# TODO: make kind=None, and use data.kind?
data = data.sp_values
# Handle use-provided dtype
if isinstance(dtype, compat.string_types):
# Two options: dtype='int', regular numpy dtype
# or dtype='Sparse[int]', a sparse dtype
try:
dtype = SparseDtype.construct_from_string(dtype)
except TypeError:
dtype = pandas_dtype(dtype)
if isinstance(dtype, SparseDtype):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
if index is not None and not is_scalar(data):
raise Exception("must only pass scalars with an index ")
if is_scalar(data):
if index is not None:
if data is None:
data = np.nan
if index is not None:
npoints = len(index)
elif sparse_index is None:
npoints = 1
else:
npoints = sparse_index.length
dtype = infer_dtype_from_scalar(data)[0]
data = construct_1d_arraylike_from_scalar(
data, npoints, dtype
)
if dtype is not None:
dtype = pandas_dtype(dtype)
# TODO: disentangle the fill_value dtype inference from
# dtype inference
if data is None:
# XXX: What should the empty dtype be? Object or float?
data = np.array([], dtype=dtype)
if not is_array_like(data):
try:
# probably shared code in sanitize_series
from pandas.core.internals.construction import sanitize_array
data = sanitize_array(data, index=None)
except ValueError:
# NumPy may raise a ValueError on data like [1, []]
# we retry with object dtype here.
if dtype is None:
dtype = object
data = np.atleast_1d(np.asarray(data, dtype=dtype))
else:
raise
if copy:
# TODO: avoid double copy when dtype forces cast.
data = data.copy()
if fill_value is None:
fill_value_dtype = data.dtype if dtype is None else dtype
if fill_value_dtype is None:
fill_value = np.nan
else:
fill_value = na_value_for_dtype(fill_value_dtype)
if isinstance(data, type(self)) and sparse_index is None:
sparse_index = data._sparse_index
sparse_values = np.asarray(data.sp_values, dtype=dtype)
elif sparse_index is None:
sparse_values, sparse_index, fill_value = make_sparse(
data, kind=kind, fill_value=fill_value, dtype=dtype
)
else:
sparse_values = np.asarray(data, dtype=dtype)
if len(sparse_values) != sparse_index.npoints:
raise AssertionError("Non array-like type {type} must "
"have the same length as the index"
.format(type=type(sparse_values)))
self._sparse_index = sparse_index
self._sparse_values = sparse_values
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
@classmethod
def _simple_new(cls, sparse_array, sparse_index, dtype):
# type: (np.ndarray, SparseIndex, SparseDtype) -> 'SparseArray'
new = cls([])
new._sparse_index = sparse_index
new._sparse_values = sparse_array
new._dtype = dtype
return new
def __array__(self, dtype=None, copy=True):
fill_value = self.fill_value
if self.sp_index.ngaps == 0:
# Compat for na dtype and int values.
return self.sp_values
if dtype is None:
# Can NumPy represent this type?
# If not, `np.result_type` will raise. We catch that
# and return object.
if is_datetime64_any_dtype(self.sp_values.dtype):
# However, we *do* special-case the common case of
# a datetime64 with pandas NaT.
if fill_value is NaT:
# Can't put pd.NaT in a datetime64[ns]
fill_value = np.datetime64('NaT')
try:
dtype = np.result_type(self.sp_values.dtype, type(fill_value))
except TypeError:
dtype = object
out = np.full(self.shape, fill_value, dtype=dtype)
out[self.sp_index.to_int_index().indices] = self.sp_values
return out
def __setitem__(self, key, value):
# I suppose we could allow setting of non-fill_value elements.
# TODO(SparseArray.__setitem__): remove special cases in
# ExtensionBlock.where
msg = "SparseArray does not support item assignment via setitem"
raise TypeError(msg)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
# ------------------------------------------------------------------------
# Data
# ------------------------------------------------------------------------
@property
def sp_index(self):
"""
The SparseIndex containing the location of non- ``fill_value`` points.
"""
return self._sparse_index
@property
def sp_values(self):
"""
An ndarray containing the non- ``fill_value`` values.
Examples
--------
>>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
>>> s.sp_values
array([1, 2])
"""
return self._sparse_values
@property
def dtype(self):
return self._dtype
@property
def fill_value(self):
"""
Elements in `data` that are `fill_value` are not stored.
For memory savings, this should be the most common value in the array.
"""
return self.dtype.fill_value
@fill_value.setter
def fill_value(self, value):
self._dtype = SparseDtype(self.dtype.subtype, value)
@property
def kind(self):
"""
The kind of sparse index for this array. One of {'integer', 'block'}.
"""
if isinstance(self.sp_index, IntIndex):
return 'integer'
else:
return 'block'
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = notna(sp_vals)
return sp_vals[mask]
def __len__(self):
return self.sp_index.length
@property
def _null_fill_value(self):
return self._dtype._is_na_fill_value
def _fill_value_matches(self, fill_value):
if self._null_fill_value:
return isna(fill_value)
else:
return self.fill_value == fill_value
@property
def nbytes(self):
return self.sp_values.nbytes + self.sp_index.nbytes
@property
def density(self):
"""
The percent of non- ``fill_value`` points, as decimal.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.density
0.6
"""
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
@property
def npoints(self):
"""
The number of non- ``fill_value`` points.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.npoints
3
"""
return self.sp_index.npoints
@property
def values(self):
"""
Dense values
"""
return self.to_dense()
def isna(self):
from pandas import isna
# If null fill value, we want SparseDtype[bool, true]
# to preserve the same memory usage.
dtype = SparseDtype(bool, self._null_fill_value)
return type(self)._simple_new(isna(self.sp_values),
self.sp_index, dtype)
def fillna(self, value=None, method=None, limit=None):
"""
Fill missing values with `value`.
Parameters
----------
value : scalar, optional
method : str, optional
.. warning::
Using 'method' will result in high memory use,
as all `fill_value` methods will be converted to
an in-memory ndarray
limit : int, optional
Returns
-------
SparseArray
Notes
-----
When `value` is specified, the result's ``fill_value`` depends on
``self.fill_value``. The goal is to maintain low-memory use.
If ``self.fill_value`` is NA, the result dtype will be
``SparseDtype(self.dtype, fill_value=value)``. This will preserve
amount of memory used before and after filling.
When ``self.fill_value`` is not NA, the result dtype will be
``self.dtype``. Again, this preserves the amount of memory used.
"""
if ((method is None and value is None) or
(method is not None and value is not None)):
raise ValueError("Must specify one of 'method' or 'value'.")
elif method is not None:
msg = "fillna with 'method' requires high memory usage."
warnings.warn(msg, PerformanceWarning)
filled = interpolate_2d(np.asarray(self), method=method,
limit=limit)
return type(self)(filled, fill_value=self.fill_value)
else:
new_values = np.where(isna(self.sp_values), value, self.sp_values)
if self._null_fill_value:
# This is essentially just updating the dtype.
new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
else:
new_dtype = self.dtype
return self._simple_new(new_values, self._sparse_index, new_dtype)
def shift(self, periods=1, fill_value=None):
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
subtype = np.result_type(fill_value, self.dtype.subtype)
if subtype != self.dtype.subtype:
# just coerce up front
arr = self.astype(SparseDtype(subtype, self.fill_value))
else:
arr = self
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=arr.dtype
)
if periods > 0:
a = empty
b = arr[:-periods]
else:
a = arr[abs(periods):]
b = empty
return arr._concat_same_type([a, b])
def _first_fill_value_loc(self):
"""
Get the location of the first missing value.
Returns
-------
int
"""
if len(self) == 0 or self.sp_index.npoints == len(self):
return -1
indices = self.sp_index.to_int_index().indices
if not len(indices) or indices[0] > 0:
return 0
diff = indices[1:] - indices[:-1]
return np.searchsorted(diff, 2) + 1
def unique(self):
uniques = list(algos.unique(self.sp_values))
fill_loc = self._first_fill_value_loc()
if fill_loc >= 0:
uniques.insert(fill_loc, self.fill_value)
return type(self)._from_sequence(uniques, dtype=self.dtype)
def _values_for_factorize(self):
# Still override this for hash_pandas_object
return np.asarray(self), self.fill_value
def factorize(self, na_sentinel=-1):
# Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
# The sparsity on this is backwards from what Sparse would want. Want
# ExtensionArray.factorize -> Tuple[EA, EA]
# Given that we have to return a dense array of labels, why bother
# implementing an efficient factorize?
labels, uniques = algos.factorize(np.asarray(self),
na_sentinel=na_sentinel)
uniques = SparseArray(uniques, dtype=self.dtype)
return labels, uniques
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of unique values.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Series
"""
from pandas import Index, Series
keys, counts = algos._value_counts_arraylike(self.sp_values,
dropna=dropna)
fcounts = self.sp_index.ngaps
if fcounts > 0:
if self._null_fill_value and dropna:
pass
else:
if self._null_fill_value:
mask = isna(keys)
else:
mask = keys == self.fill_value
if mask.any():
counts[mask] += fcounts
else:
keys = np.insert(keys, 0, self.fill_value)
counts = np.insert(counts, 0, fcounts)
if not isinstance(keys, ABCIndexClass):
keys = Index(keys)
result = Series(counts, index=keys)
return result
# --------
# Indexing
# --------
def __getitem__(self, key):
if isinstance(key, tuple):
if len(key) > 1:
raise IndexError("too many indices for array.")
key = key[0]
if is_integer(key):
return self._get_val_at(key)
elif isinstance(key, tuple):
data_slice = self.values[key]
elif isinstance(key, slice):
# special case to preserve dtypes
if key == slice(None):
return self.copy()
# TODO: this logic is surely elsewhere
# TODO: this could be more efficient
indices = np.arange(len(self), dtype=np.int32)[key]
return self.take(indices)
else:
# TODO: I think we can avoid densifying when masking a
# boolean SparseArray with another. Need to look at the
# key's fill_value for True / False, and then do an intersection
# on the indicies of the sp_values.
if isinstance(key, SparseArray):
if is_bool_dtype(key):
key = key.to_dense()
else:
key = np.asarray(key)
if com.is_bool_indexer(key) and len(self) == len(key):
return self.take(np.arange(len(key), dtype=np.int32)[key])
elif hasattr(key, '__len__'):
return self.take(key)
else:
raise ValueError("Cannot slice with '{}'".format(key))
return type(self)(data_slice, kind=self.kind)
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return libindex.get_value_at(self.sp_values, sp_loc)
def take(self, indices, allow_fill=False, fill_value=None):
if is_scalar(indices):
raise ValueError("'indices' must be an array, not a "
"scalar '{}'.".format(indices))
indices = np.asarray(indices, dtype=np.int32)
if indices.size == 0:
result = []
kwargs = {'dtype': self.dtype}
elif allow_fill:
result = self._take_with_fill(indices, fill_value=fill_value)
kwargs = {}
else:
result = self._take_without_fill(indices)
kwargs = {'dtype': self.dtype}
return type(self)(result, fill_value=self.fill_value, kind=self.kind,
**kwargs)
def _take_with_fill(self, indices, fill_value=None):
if fill_value is None:
fill_value = self.dtype.na_value
if indices.min() < -1:
raise ValueError("Invalid value in 'indices'. Must be between -1 "
"and the length of the array.")
if indices.max() >= len(self):
raise IndexError("out of bounds value in 'indices'.")
if len(self) == 0:
# Empty... Allow taking only if all empty
if (indices == -1).all():
dtype = np.result_type(self.sp_values, type(fill_value))
taken = np.empty_like(indices, dtype=dtype)
taken.fill(fill_value)
return taken
else:
raise IndexError('cannot do a non-empty take from an empty '
'axes.')
sp_indexer = self.sp_index.lookup_array(indices)
if self.sp_index.npoints == 0:
# Avoid taking from the empty self.sp_values
taken = np.full(sp_indexer.shape, fill_value=fill_value,
dtype=np.result_type(type(fill_value)))
else:
taken = self.sp_values.take(sp_indexer)
# sp_indexer may be -1 for two reasons
# 1.) we took for an index of -1 (new)
# 2.) we took a value that was self.fill_value (old)
new_fill_indices = indices == -1
old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
# Fill in two steps.
# Old fill values
# New fill values
# potentially coercing to a new dtype at each stage.
m0 = sp_indexer[old_fill_indices] < 0
m1 = sp_indexer[new_fill_indices] < 0
result_type = taken.dtype
if m0.any():
result_type = np.result_type(result_type,
type(self.fill_value))
taken = taken.astype(result_type)
taken[old_fill_indices] = self.fill_value
if m1.any():
result_type = np.result_type(result_type, type(fill_value))
taken = taken.astype(result_type)
taken[new_fill_indices] = fill_value
return taken
def _take_without_fill(self, indices):
to_shift = indices < 0
indices = indices.copy()
n = len(self)
if (indices.max() >= n) or (indices.min() < -n):
if n == 0:
raise IndexError("cannot do a non-empty take from an "
"empty axes.")
else:
raise IndexError("out of bounds value in 'indices'.")
if to_shift.any():
indices[to_shift] += n
if self.sp_index.npoints == 0:
# edge case in take...
# I think just return
out = np.full(indices.shape, self.fill_value,
dtype=np.result_type(type(self.fill_value)))
arr, sp_index, fill_value = make_sparse(out,
fill_value=self.fill_value)
return type(self)(arr, sparse_index=sp_index,
fill_value=fill_value)
sp_indexer = self.sp_index.lookup_array(indices)
taken = self.sp_values.take(sp_indexer)
fillable = (sp_indexer < 0)
if fillable.any():
# TODO: may need to coerce array to fill value
result_type = np.result_type(taken, type(self.fill_value))
taken = taken.astype(result_type)
taken[fillable] = self.fill_value
return taken
def searchsorted(self, v, side="left", sorter=None):
msg = "searchsorted requires high memory usage."
warnings.warn(msg, PerformanceWarning, stacklevel=2)
if not is_scalar(v):
v = np.asarray(v)
v = np.asarray(v)
return np.asarray(self, dtype=self.dtype.subtype).searchsorted(
v, side, sorter
)
def copy(self, deep=False):
if deep:
values = self.sp_values.copy()
else:
values = self.sp_values
return self._simple_new(values, self.sp_index, self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
fill_values = [x.fill_value for x in to_concat]
fill_value = fill_values[0]
# np.nan isn't a singleton, so we may end up with multiple
# NaNs here, so we ignore tha all NA case too.
if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
warnings.warn("Concatenating sparse arrays with multiple fill "
"values: '{}'. Picking the first and "
"converting the rest.".format(fill_values),
PerformanceWarning,
stacklevel=6)
keep = to_concat[0]
to_concat2 = [keep]
for arr in to_concat[1:]:
to_concat2.append(cls(np.asarray(arr), fill_value=fill_value))
to_concat = to_concat2
values = []
length = 0
if to_concat:
sp_kind = to_concat[0].kind
else:
sp_kind = 'integer'
if sp_kind == 'integer':
indices = []
for arr in to_concat:
idx = arr.sp_index.to_int_index().indices.copy()
idx += length # TODO: wraparound
length += arr.sp_index.length
values.append(arr.sp_values)
indices.append(idx)
data = np.concatenate(values)
indices = np.concatenate(indices)
sp_index = IntIndex(length, indices)
else:
# when concatentating block indices, we don't claim that you'll
# get an identical index as concating the values and then
# creating a new index. We don't want to spend the time trying
# to merge blocks across arrays in `to_concat`, so the resulting
# BlockIndex may have more blocs.
blengths = []
blocs = []
for arr in to_concat:
idx = arr.sp_index.to_block_index()
values.append(arr.sp_values)
blocs.append(idx.blocs.copy() + length)
blengths.append(idx.blengths)
length += arr.sp_index.length
data = np.concatenate(values)
blocs = np.concatenate(blocs)
blengths = np.concatenate(blengths)
sp_index = BlockIndex(length, blocs, blengths)
return cls(data, sparse_index=sp_index, fill_value=fill_value)
def astype(self, dtype=None, copy=True):
"""
Change the dtype of a SparseArray.
The output will always be a SparseArray. To convert to a dense
ndarray with a certain dtype, use :meth:`numpy.asarray`.
Parameters
----------
dtype : np.dtype or ExtensionDtype
For SparseDtype, this changes the dtype of
``self.sp_values`` and the ``self.fill_value``.
For other dtypes, this only changes the dtype of
``self.sp_values``.
copy : bool, default True
Whether to ensure a copy is made, even if not necessary.
Returns
-------
SparseArray
Examples
--------
>>> arr = SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
>>> arr.astype(np.dtype('int32'))
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
>>> arr.astype(np.dtype('float64'))
... # doctest: +NORMALIZE_WHITESPACE
[0, 0, 1.0, 2.0]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Use a SparseDtype if you wish to be change the fill value as well.
>>> arr.astype(SparseDtype("float64", fill_value=np.nan))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
IntIndex
Indices: array([2, 3], dtype=int32)
"""
dtype = self.dtype.update_dtype(dtype)
subtype = dtype._subtype_with_str
sp_values = astype_nansafe(self.sp_values,
subtype,
copy=copy)
if sp_values is self.sp_values and copy:
sp_values = sp_values.copy()
return self._simple_new(sp_values,
self.sp_index,
dtype)
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.SparseArray([0, 1, 2])
>>> arr.apply(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
# this is used in apply.
# We get hit since we're an "is_extension_type" but regular extension
# types are not hit. This may be worth adding to the interface.
if isinstance(mapper, ABCSeries):
mapper = mapper.to_dict()
if isinstance(mapper, compat.Mapping):
fill_value = mapper.get(self.fill_value, self.fill_value)
sp_values = [mapper.get(x, None) for x in self.sp_values]
else:
fill_value = mapper(self.fill_value)
sp_values = [mapper(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index,
fill_value=fill_value)
def to_dense(self):
"""
Convert SparseArray to a NumPy array.
Returns
-------
arr : NumPy array
"""
return np.asarray(self, dtype=self.sp_values.dtype)
# TODO: Look into deprecating this in favor of `to_dense`.
get_values = to_dense
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, tuple):
# Compat for pandas < 0.24.0
nd_state, (fill_value, sp_index) = state
sparse_values = np.array([])
sparse_values.__setstate__(nd_state)
self._sparse_values = sparse_values
self._sparse_index = sp_index
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
else:
self.__dict__.update(state)
def nonzero(self):
if self.fill_value == 0:
return self.sp_index.to_int_index().indices,
else:
return self.sp_index.to_int_index().indices[self.sp_values != 0],
# ------------------------------------------------------------------------
# Reductions
# ------------------------------------------------------------------------
def _reduce(self, name, skipna=True, **kwargs):
method = getattr(self, name, None)
if method is None:
raise TypeError("cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype))
if skipna:
arr = self
else:
arr = self.dropna()
# we don't support these kwargs.
# They should only be present when called via pandas, so do it here.
# instead of in `any` / `all` (which will raise if they're present,
# thanks to nv.validate
kwargs.pop('filter_type', None)
kwargs.pop('numeric_only', None)
kwargs.pop('op', None)
return getattr(arr, name)(**kwargs)
def all(self, axis=None, *args, **kwargs):
"""
Tests whether all elements evaluate True
Returns
-------
all : bool
See Also
--------
numpy.all
"""
nv.validate_all(args, kwargs)
values = self.sp_values
if len(values) != len(self) and not np.all(self.fill_value):
return False
return values.all()
def any(self, axis=0, *args, **kwargs):
"""
Tests whether at least one of elements evaluate True
Returns
-------
any : bool
See Also
--------
numpy.any
"""
nv.validate_any(args, kwargs)
values = self.sp_values
if len(values) != len(self) and np.any(self.fill_value):
return True
return values.any().item()
def sum(self, axis=0, *args, **kwargs):
"""
Sum of non-NA/null values
Returns
-------
sum : float
"""
nv.validate_sum(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
if self._null_fill_value:
return sp_sum
else:
nsparse = self.sp_index.ngaps
return sp_sum + self.fill_value * nsparse
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseArray will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : int or None
Axis over which to perform the cumulative summation. If None,
perform cumulative summation over flattened array.
Returns
-------
cumsum : SparseArray
"""
nv.validate_cumsum(args, kwargs)
if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
raise ValueError("axis(={axis}) out of bounds".format(axis=axis))
if not self._null_fill_value:
return SparseArray(self.to_dense()).cumsum()
return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index,
fill_value=self.fill_value)
def mean(self, axis=0, *args, **kwargs):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
nv.validate_mean(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
ct = len(valid_vals)
if self._null_fill_value:
return sp_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
def transpose(self, *axes):
"""
Returns the SparseArray.
"""
return self
@property
def T(self):
"""
Returns the SparseArray.
"""
return self
# ------------------------------------------------------------------------
# Ufuncs
# ------------------------------------------------------------------------
def __array_wrap__(self, array, context=None):
from pandas.core.dtypes.generic import ABCSparseSeries
ufunc, inputs, _ = context
inputs = tuple(x.values if isinstance(x, ABCSparseSeries) else x
for x in inputs)
return self.__array_ufunc__(ufunc, '__call__', *inputs)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
return NotImplemented
special = {'add', 'sub', 'mul', 'pow', 'mod', 'floordiv', 'truediv',
'divmod', 'eq', 'ne', 'lt', 'gt', 'le', 'ge', 'remainder'}
if compat.PY2:
special.add('div')
aliases = {
'subtract': 'sub',
'multiply': 'mul',
'floor_divide': 'floordiv',
'true_divide': 'truediv',
'power': 'pow',
'remainder': 'mod',
'divide': 'div',
'equal': 'eq',
'not_equal': 'ne',
'less': 'lt',
'less_equal': 'le',
'greater': 'gt',
'greater_equal': 'ge',
}
flipped = {
'lt': '__gt__',
'le': '__ge__',
'gt': '__lt__',
'ge': '__le__',
'eq': '__eq__',
'ne': '__ne__',
}
op_name = ufunc.__name__
op_name = aliases.get(op_name, op_name)
if op_name in special and kwargs.get('out') is None:
if isinstance(inputs[0], type(self)):
return getattr(self, '__{}__'.format(op_name))(inputs[1])
else:
name = flipped.get(op_name, '__r{}__'.format(op_name))
return getattr(self, name)(inputs[0])
if len(inputs) == 1:
# No alignment necessary.
sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)
fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)
return self._simple_new(sp_values,
self.sp_index,
SparseDtype(sp_values.dtype, fill_value))
result = getattr(ufunc, method)(*[np.asarray(x) for x in inputs],
**kwargs)
if out:
if len(out) == 1:
out = out[0]
return out
if type(result) is tuple:
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
return type(self)(result)
def __abs__(self):
return np.abs(self)
# ------------------------------------------------------------------------
# Ops
# ------------------------------------------------------------------------
@classmethod
def _create_unary_method(cls, op):
def sparse_unary_method(self):
fill_value = op(np.array(self.fill_value)).item()
values = op(self.sp_values)
dtype = SparseDtype(values.dtype, fill_value)
return cls._simple_new(values, self.sp_index, dtype)
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(sparse_unary_method, name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
def sparse_arithmetic_method(self, other):
op_name = op.__name__
if isinstance(other, (ABCSeries, ABCIndexClass)):
# Rely on pandas to dispatch to us.
return NotImplemented
if isinstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(all='ignore'):
fill = op(_get_fill(self), np.asarray(other))
result = op(self.sp_values, other)
if op_name == 'divmod':
left, right = result
lfill, rfill = fill
return (_wrap_result(op_name, left, self.sp_index, lfill),
_wrap_result(op_name, right, self.sp_index, rfill))
return _wrap_result(op_name, result, self.sp_index, fill)
else:
other = np.asarray(other)
with np.errstate(all='ignore'):
# TODO: delete sparse stuff in core/ops.py
# TODO: look into _wrap_result
if len(self) != len(other):
raise AssertionError(
("length mismatch: {self} vs. {other}".format(
self=len(self), other=len(other))))
if not isinstance(other, SparseArray):
dtype = getattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, op_name)
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(sparse_arithmetic_method, name, cls)
@classmethod
def _create_comparison_method(cls, op):
def cmp_method(self, other):
op_name = op.__name__
if op_name in {'and_', 'or_'}:
op_name = op_name[:-1]
if isinstance(other, (ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
if not is_scalar(other) and not isinstance(other, type(self)):
# convert list-like to ndarray
other = np.asarray(other)
if isinstance(other, np.ndarray):
# TODO: make this more flexible than just ndarray...
if len(self) != len(other):
raise AssertionError("length mismatch: {self} vs. {other}"
.format(self=len(self),
other=len(other)))
other = SparseArray(other, fill_value=self.fill_value)
if isinstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
else:
with np.errstate(all='ignore'):
fill_value = op(self.fill_value, other)
result = op(self.sp_values, other)
return type(self)(result,
sparse_index=self.sp_index,
fill_value=fill_value,
dtype=np.bool_)
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(cmp_method, name, cls)
@classmethod
def _add_unary_ops(cls):
cls.__pos__ = cls._create_unary_method(operator.pos)
cls.__neg__ = cls._create_unary_method(operator.neg)
cls.__invert__ = cls._create_unary_method(operator.invert)
@classmethod
def _add_comparison_ops(cls):
cls.__and__ = cls._create_comparison_method(operator.and_)
cls.__or__ = cls._create_comparison_method(operator.or_)
super(SparseArray, cls)._add_comparison_ops()
# ----------
# Formatting
# -----------
def __unicode__(self):
return '{self}\nFill: {fill}\n{index}'.format(
self=printing.pprint_thing(self),
fill=printing.pprint_thing(self.fill_value),
index=printing.pprint_thing(self.sp_index))
def _formatter(self, boxed=False):
# Defer to the formatter from the GenericArrayFormatter calling us.
# This will infer the correct formatter from the dtype of the values.
return None
SparseArray._add_arithmetic_ops()
SparseArray._add_comparison_ops()
SparseArray._add_unary_ops()
def _maybe_to_dense(obj):
"""
try to convert to dense
"""
if hasattr(obj, 'to_dense'):
return obj.to_dense()
return obj
def _maybe_to_sparse(array):
"""
array must be SparseSeries or SparseArray
"""
if isinstance(array, ABCSparseSeries):
array = array.values.copy()
return array
def _sanitize_values(arr):
"""
return an ndarray for our input,
in a platform independent manner
"""
if hasattr(arr, 'values'):
arr = arr.values
else:
# scalar
if is_scalar(arr):
arr = [arr]
# ndarray
if isinstance(arr, np.ndarray):
pass
elif is_list_like(arr) and len(arr) > 0:
arr = maybe_convert_platform(arr)
else:
arr = np.asarray(arr)
return arr
def make_sparse(arr, kind='block', fill_value=None, dtype=None, copy=False):
"""
Convert ndarray to sparse format
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
dtype : np.dtype, optional
copy : bool, default False
Returns
-------
(sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
"""
arr = _sanitize_values(arr)
if arr.ndim > 1:
raise TypeError("expected dimension <= 1 data")
if fill_value is None:
fill_value = na_value_for_dtype(arr.dtype)
if isna(fill_value):
mask = notna(arr)
else:
# For str arrays in NumPy 1.12.0, operator!= below isn't
# element-wise but just returns False if fill_value is not str,
# so cast to object comparison to be safe
if is_string_dtype(arr):
arr = arr.astype(object)
if is_object_dtype(arr.dtype):
# element-wise equality check method in numpy doesn't treat
# each element type, eg. 0, 0.0, and False are treated as
# same. So we have to check the both of its type and value.
mask = splib.make_mask_object_ndarray(arr, fill_value)
else:
mask = arr != fill_value
length = len(arr)
if length != len(mask):
# the arr is a SparseArray
indices = mask.sp_index.indices
else:
indices = mask.nonzero()[0].astype(np.int32)
index = _make_index(length, indices, kind)
sparsified_values = arr[mask]
if dtype is not None:
sparsified_values = astype_nansafe(sparsified_values, dtype=dtype)
# TODO: copy
return sparsified_values, index, fill_value
def _make_index(length, indices, kind):
if kind == 'block' or isinstance(kind, BlockIndex):
locs, lens = splib.get_blocks(indices)
index = BlockIndex(length, locs, lens)
elif kind == 'integer' or isinstance(kind, IntIndex):
index = IntIndex(length, indices)
else: # pragma: no cover
raise ValueError('must be block or integer type')
return index
# ----------------------------------------------------------------------------
# Accessor
@delegate_names(SparseArray, ['npoints', 'density', 'fill_value',
'sp_values'],
typ='property')
class SparseAccessor(PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def __init__(self, data=None):
self._validate(data)
# Store the Series since we need that for to_coo
self._parent = data
@staticmethod
def _validate(data):
if not isinstance(data.dtype, SparseDtype):
msg = "Can only use the '.sparse' accessor with Sparse data."
raise AttributeError(msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.values, name)
def _delegate_method(self, name, *args, **kwargs):
if name == 'from_coo':
return self.from_coo(*args, **kwargs)
elif name == 'to_coo':
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : SparseSeries
Examples
---------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = pd.SparseSeries.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
from pandas.core.sparse.scipy_sparse import _coo_to_sparse_series
from pandas import Series
result = _coo_to_sparse_series(A, dense_index=dense_index)
# SparseSeries -> Series[sparse]
result = Series(result.values, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
>>> ss = s.to_sparse()
>>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
column_levels=['C', 'D'],
sort_labels=True)
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 3.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.sparse.scipy_sparse import _sparse_series_to_coo
A, rows, columns = _sparse_series_to_coo(self._parent,
row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
|
bsd-3-clause
|
simpeg/simpeg
|
examples/02-mesh/plot_poisson.py
|
1
|
1736
|
"""
Mesh: Basic Forward 2D DC Resistivity
=====================================
2D DC forward modeling example with Tensor and Curvilinear Meshes
"""
from SimPEG import Mesh, Utils, SolverLU
import numpy as np
import matplotlib.pyplot as plt
def run(plotIt=True):
# Step1: Generate Tensor and Curvilinear Mesh
sz = [40, 40]
tM = Mesh.TensorMesh(sz)
rM = Mesh.CurvilinearMesh(Utils.meshutils.exampleLrmGrid(sz, 'rotate'))
# Step2: Direct Current (DC) operator
def DCfun(mesh, pts):
D = mesh.faceDiv
sigma = 1e-2*np.ones(mesh.nC)
MsigI = mesh.getFaceInnerProduct(sigma, invProp=True, invMat=True)
A = -D*MsigI*D.T
A[-1, -1] /= mesh.vol[-1] # Remove null space
rhs = np.zeros(mesh.nC)
txind = Utils.meshutils.closestPoints(mesh, pts)
rhs[txind] = np.r_[1, -1]
return A, rhs
pts = np.vstack((np.r_[0.25, 0.5], np.r_[0.75, 0.5]))
# Step3: Solve DC problem (LU solver)
AtM, rhstM = DCfun(tM, pts)
AinvtM = SolverLU(AtM)
phitM = AinvtM*rhstM
ArM, rhsrM = DCfun(rM, pts)
AinvrM = SolverLU(ArM)
phirM = AinvrM*rhsrM
if not plotIt:
return
# Step4: Making Figure
fig, axes = plt.subplots(1, 2, figsize=(12*1.2, 4*1.2))
vmin, vmax = phitM.min(), phitM.max()
dat = tM.plotImage(phitM, ax=axes[0], clim=(vmin, vmax), grid=True)
dat = rM.plotImage(phirM, ax=axes[1], clim=(vmin, vmax), grid=True)
cb0 = plt.colorbar(dat[0], ax=axes[0])
cb1 = plt.colorbar(dat[0], ax=axes[1])
cb0.set_label("Voltage (V)")
cb1.set_label("Voltage (V)")
axes[0].set_title('TensorMesh')
axes[1].set_title('CurvilinearMesh')
if __name__ == '__main__':
run()
plt.show()
|
mit
|
TheArbiter/Networks
|
lab4/lab4exercise2/helper.py
|
8
|
3406
|
'''
Helper module for the plot scripts.
'''
import re
import itertools
import matplotlib as m
import os
if os.uname()[0] == "Darwin":
m.use("MacOSX")
else:
m.use("Agg")
import matplotlib.pyplot as plt
import argparse
import math
#import termcolor as T
def read_list(fname, delim=','):
lines = open(fname).xreadlines()
ret = []
for l in lines:
ls = l.strip().split(delim)
ls = map(lambda e: '0' if e.strip() == '' or e.strip() == 'ms' or e.strip() == 's' else e, ls)
ret.append(ls)
return ret
def ewma(alpha, values):
if alpha == 0:
return values
ret = []
prev = 0
for v in values:
prev = alpha * prev + (1 - alpha) * v
ret.append(prev)
return ret
def col(n, obj = None, clean = lambda e: e):
"""A versatile column extractor.
col(n, [1,2,3]) => returns the nth value in the list
col(n, [ [...], [...], ... ] => returns the nth column in this matrix
col('blah', { ... }) => returns the blah-th value in the dict
col(n) => partial function, useful in maps
"""
if obj == None:
def f(item):
return clean(item[n])
return f
if type(obj) == type([]):
if len(obj) > 0 and (type(obj[0]) == type([]) or type(obj[0]) == type({})):
return map(col(n, clean=clean), obj)
if type(obj) == type([]) or type(obj) == type({}):
try:
return clean(obj[n])
except:
#print T.colored('col(...): column "%s" not found!' % (n), 'red')
return None
# We wouldn't know what to do here, so just return None
#print T.colored('col(...): column "%s" not found!' % (n), 'red')
return None
def transpose(l):
return zip(*l)
def avg(lst):
return sum(map(float, lst)) / len(lst)
def stdev(lst):
mean = avg(lst)
var = avg(map(lambda e: (e - mean)**2, lst))
return math.sqrt(var)
def xaxis(values, limit):
l = len(values)
return zip(*map(lambda (x,y): (x*1.0*limit/l, y), enumerate(values)))
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def cdf(values):
values.sort()
prob = 0
l = len(values)
x, y = [], []
for v in values:
prob += 1.0 / l
x.append(v)
y.append(prob)
return (x, y)
def parse_cpu_usage(fname, nprocessors=8):
"""Returns (user,system,nice,iowait,hirq,sirq,steal) tuples
aggregated over all processors. DOES NOT RETURN IDLE times."""
data = grouper(nprocessors, open(fname).readlines())
"""Typical line looks like:
Cpu0 : 0.0%us, 1.0%sy, 0.0%ni, 97.0%id, 0.0%wa, 0.0%hi, 2.0%si, 0.0%st
"""
ret = []
for collection in data:
total = [0]*8
for cpu in collection:
usages = cpu.split(':')[1]
usages = map(lambda e: e.split('%')[0],
usages.split(','))
for i in xrange(len(usages)):
total[i] += float(usages[i])
total = map(lambda t: t/nprocessors, total)
# Skip idle time
ret.append(total[0:3] + total[4:])
return ret
def pc95(lst):
l = len(lst)
return sorted(lst)[ int(0.95 * l) ]
def pc99(lst):
l = len(lst)
return sorted(lst)[ int(0.99 * l) ]
def coeff_variation(lst):
return stdev(lst) / avg(lst)
|
gpl-3.0
|
fegonda/icon_demo
|
code/model/unet/unet.py
|
1
|
23919
|
import cPickle
import gzip
import os
import sys
import time
import numpy
import numpy as np
import multiprocessing
from keras.models import Model, Sequential, model_from_json
from keras.layers import Dense, Activation, Flatten, Input
from keras.layers import Convolution2D, MaxPooling2D, UpSampling2D, merge, ZeroPadding2D, Dropout, Lambda
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping
from keras import backend as K
from keras.optimizers import SGD
from keras.regularizers import l2
from generate_data import *
#import multiprocessing
import sys
import matplotlib
import matplotlib.pyplot as plt
# loosing independence of backend for
# custom loss function
import theano
import theano.tensor as T
from evaluation import Rand_membrane_prob
from theano.tensor.shared_randomstreams import RandomStreams
base_path = os.path.dirname(__file__)
#sys.path.insert(1,os.path.join(base_path, '..'))
sys.path.insert(1,os.path.join(base_path, '../mlp'))
sys.path.insert(2,os.path.join(base_path, '../../common'))
sys.path.insert(3,os.path.join(base_path, '../../database'))
sys.path.insert(4,os.path.join(base_path, '../'))
from db import DB
from paths import Paths
from utility import Utility
from data import Data
srng = RandomStreams(1234)
class UNET(object):
srng = RandomStreams(1234)
def __init__(self, project):
self.done = False
self.project = project
self.offline = False
self.model = None
self.best_val_loss_so_far = 0
self.patience_counter = 0
self.patience = 100
self.patience_reset = 100
self.doBatchNormAll = False
self.doFineTune = False
self.patchSize = 572
self.patchSize_out = 388
self.n_samples_training = 30
self.n_samples_validation = 20
self.initialization = 'glorot_uniform'
def initialize(self):
pass
def train(self, offline=False, mean=None, std=None):
self.offline = offline
print 'Unet.train'
patchSize = self.patchSize
patchSize_out = self.patchSize_out
learning_rate = self.project.learningRate
momentum = self.project.momentum
# input data should be large patches as prediction is also over large patches
print
print "=== building network ==="
print "== BLOCK 1 =="
input = Input(shape=(1, patchSize, patchSize))
print "input ", input._keras_shape
block1_act, block1_pool = UNET.unet_block_down(input=input, nb_filter=64, doBatchNorm=self.doBatchNormAll)
print "block1 act ", block1_act._keras_shape
print "block1 ", block1_pool._keras_shape
#sys.stdout.flush()
print "== BLOCK 2 =="
block2_act, block2_pool = UNET.unet_block_down(input=block1_pool, nb_filter=128, doBatchNorm=self.doBatchNormAll)
print "block2 ", block2_pool._keras_shape
#sys.stdout.flush()
print "== BLOCK 3 =="
block3_act, block3_pool = UNET.unet_block_down(input=block2_pool, nb_filter=256, doBatchNorm=self.doBatchNormAll)
print "block3 ", block3_pool._keras_shape
#sys.stdout.flush()
print "== BLOCK 4 =="
block4_act, block4_pool = UNET.unet_block_down(input=block3_pool, nb_filter=512, doDropout=True, doBatchNorm=self.doBatchNormAll)
print "block4 ", block4_pool._keras_shape
#sys.stdout.flush()
print "== BLOCK 5 =="
print "no pooling"
block5_act, block5_pool = UNET.unet_block_down(input=block4_pool, nb_filter=1024, doDropout=True, doPooling=False, doBatchNorm=self.doBatchNormAll)
print "block5 ", block5_pool._keras_shape
#sys.stdout.flush()
print "=============="
print
print "== BLOCK 4 UP =="
block4_up = UNET.unet_block_up(input=block5_act, nb_filter=512, down_block_out=block4_act, doBatchNorm=self.doBatchNormAll)
print "block4 up", block4_up._keras_shape
print
#sys.stdout.flush()
print "== BLOCK 3 UP =="
block3_up = UNET.unet_block_up(input=block4_up, nb_filter=256, down_block_out=block3_act, doBatchNorm=self.doBatchNormAll)
print "block3 up", block3_up._keras_shape
print
#sys.stdout.flush()
print "== BLOCK 2 UP =="
block2_up = UNET.unet_block_up(input=block3_up, nb_filter=128, down_block_out=block2_act, doBatchNorm=self.doBatchNormAll)
print "block2 up", block2_up._keras_shape
#sys.stdout.flush()
print
print "== BLOCK 1 UP =="
block1_up = UNET.unet_block_up(input=block2_up, nb_filter=64, down_block_out=block1_act, doBatchNorm=self.doBatchNormAll)
print "block1 up", block1_up._keras_shape
sys.stdout.flush()
print "== 1x1 convolution =="
output = Convolution2D(nb_filter=1, nb_row=1, nb_col=1, subsample=(1,1),
init=self.initialization, activation='sigmoid', border_mode="valid")(block1_up)
print "output ", output._keras_shape
output_flat = Flatten()(output)
print "output flat ", output_flat._keras_shape
print 'Unet.train'
#self.load()
if not self.load():
self.model = Model(input=input, output=output_flat)
sgd = SGD(lr=learning_rate, decay=0, momentum=momentum, nesterov=False)
self.model.compile(loss=UNET.unet_crossentropy_loss_sampled, optimizer=sgd)
#self.model.compile(loss=UNET.unet_crossentropy_loss, optimizer=sgd)
#self.model.compile(loss="categorical_crossentropy", optimizer=sgd)
data = gen_data(self.project, 'validation', self.n_samples_validation, patchSize, patchSize_out)
data_x_val = data[0].astype(np.float32)
data_x_val = np.reshape(data_x_val, [-1, 1, patchSize, patchSize])
data_y_val = data[1].astype(np.float32)
print 'val x:', data_x_val.shape
print 'val y:', data_y_val.shape
#exit(1)
#data = gen_data(project, 'train', train_samples, patchSize, patchSize_out)
#data = generate_experiment_data_patch_prediction(purpose,train_samples,patchSize, patchSize_out)
#exit(1)
'''
data_x = data[0].astype(np.float32)
data_x = np.reshape(data_x, [-1, 1, patchSize, patchSize])
data_y = data[1].astype(np.float32)
print 'x:', data_x.shape
print 'y:', data_y.shape
data_val = gen_validation_data(project, train_samples, patchSize, patchSize_out)
data_x_val = data_val[0].astype(np.float32)
data_x_val = np.reshape(data_x_val, [-1, 1, patchSize, patchSize])
data_y_val = data_val[1].astype(np.float32)
data_label_val = data_val[2]
print 'val x:', data_x_val.shape
print 'val y:', data_y_val.shape
print 'val labels:', data_label_val.shape
'''
# start pool for data
print "Starting worker."
pool = multiprocessing.Pool(processes=1)
purpose = 'train'
futureData = pool.apply_async(stupid_map_wrapper, [[gen_data,self.project,purpose, self.n_samples_training, patchSize, patchSize_out]])
best_val_loss_so_far = 0
patience_counter = 0
for epoch in xrange(10000000):
if self.done:
print 'stopping training...'
break
print "Waiting for data."
data = futureData.get()
#data = gen_data(self.project, 'train', self.n_samples_training, patchSize, patchSize_out)
data_x = data[0].astype(np.float32)
data_x = np.reshape(data_x, [-1, 1, patchSize, patchSize])
data_y = data[1].astype(np.float32)
print "got new data"
print 'x:', data_x.shape
print 'y:', data_y.shape
futureData = pool.apply_async(stupid_map_wrapper, [[gen_data,self.project,purpose, self.n_samples_training, patchSize, patchSize_out]])
#print "current learning rate: ", self.model.optimizer.lr.get_value()
self.model.fit(data_x, data_y, batch_size=1, nb_epoch=1)
im_pred = 1-self.model.predict(x=data_x_val, batch_size = 1)
#print im_pred.shape
#print np.unique( im_pred )
self.save()
if True:
continue
mean_val_rand = 0.0
val_samples = data_x_val.shape[0]
for val_ind in xrange(val_samples):
im_pred_single = np.reshape(im_pred[val_ind,:], (patchSize_out,patchSize_out))
im_gt = np.reshape(data_label_val[val_ind], (patchSize_out,patchSize_out))
validation_rand = Rand_membrane_prob(im_pred_single, im_gt)
mean_val_rand += validation_rand
#print 'val:', val_ind, 'rand:', validation_rand, 'mrand:', mean_val_rand
mean_val_rand /= np.double(val_samples)
#print "validation RAND ", mean_val_rand
print mean_val_rand, " > ", self.best_val_loss_so_far
print mean_val_rand - self.best_val_loss_so_far
if mean_val_rand > self.best_val_loss_so_far:
self.best_val_loss_so_far = mean_val_rand
print "NEW BEST MODEL"
self.save_best()
self.patience_counter=0
else:
self.patience_counter +=1
# no progress anymore, need to decrease learning rate
if self.patience_counter == self.patience:
print "DECREASING LEARNING RATE"
print "before: ", learning_rate
learning_rate *= 0.1
print "now: ", learning_rate
self.model.optimizer.lr.set_value(learning_rate)
self.patience = self.patience_reset
self.patience_counter = 0
# reload best state seen so far
self.model = self.load()
def predict(self, image, mean=None, std=None, threshold=0.5):
print 'UNET.predict'
patchSize = self.patchSize
patchSize_out = self.patchSize_out
start_time = time.clock()
j_path, w_path, rev = self.get_paths( forSaving=False, forBest=True)
if not os.path.exists( j_path ):
j_path, w_path, rev = self.get_paths( forSaving=False, forBest=False)
model = model_from_json(open( j_path ).read())
model.load_weights( w_path )
sgd = SGD(lr=0.01, decay=0, momentum=0.0, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
image = image - 0.5
probImage = np.zeros(image.shape)
# count compilation time to init
row = 0
col = 0
patch = image[row:row+patchSize,col:col+patchSize]
data = np.reshape(patch, (1,1,patchSize,patchSize))
probs = model.predict(x=data, batch_size=1)
init_time = time.clock()
#print "Initialization took: ", init_time - start_time
image_orig = image.copy()
for rotation in range(1):
image = np.rot90(image_orig, rotation)
# pad the image
padding_ul = int(np.ceil((patchSize - patchSize_out)/2.0))
# need large padding for lower right corner
paddedImage = np.pad(image, patchSize, mode='reflect')
needed_ul_padding = patchSize - padding_ul
paddedImage = paddedImage[needed_ul_padding:, needed_ul_padding:]
probImage_tmp = np.zeros(image.shape)
for row in xrange(0,image.shape[0],patchSize_out):
for col in xrange(0,image.shape[1],patchSize_out):
patch = paddedImage[row:row+patchSize,col:col+patchSize]
data = np.reshape(patch, (1,1,patchSize,patchSize))
probs = 1-model.predict(x=data, batch_size = 1)
probs = np.reshape(probs, (patchSize_out,patchSize_out))
row_end = patchSize_out
if row+patchSize_out > probImage.shape[0]:
row_end = probImage.shape[0]-row
col_end = patchSize_out
if col+patchSize_out > probImage.shape[1]:
col_end = probImage.shape[1]-col
probImage_tmp[row:row+row_end,col:col+col_end] = probs[:row_end,:col_end]
probImage += np.rot90(probImage_tmp, 4-rotation)
probImage = probImage / 1.0
prob = self.threshold( probImage, factor=threshold )
prob = prob.astype(dtype=int)
prob = prob.flatten()
end_time = time.clock()
print "Prediction took: ", end_time - init_time
print "Speed: ", 1./(end_time - init_time)
print "Time total: ", end_time-start_time
print 'results :', np.bincount( prob )
print prob.shape
print prob
return prob
def threshold(self, prob, factor=0.5):
prob[ prob >= factor ] = 9
prob[ prob < factor ] = 1
prob[ prob == 9 ] = 0
return prob
# need to define a custom loss, because all pre-implementations
# seem to assume that scores over patch add up to one which
# they clearly don't and shouldn't
@staticmethod
def unet_crossentropy_loss(y_true, y_pred):
weight_class_1 = 1.
epsilon = 1.0e-4
y_pred_clipped = T.clip(y_pred, epsilon, 1.0-epsilon)
loss_vector = -T.mean(weight_class_1*y_true * T.log(y_pred_clipped) + (1-y_true) * T.log(1-y_pred_clipped), axis=1)
average_loss = T.mean(loss_vector)
return average_loss
@staticmethod
def aaunet_crossentropy_loss_sampled(y_true, y_pred):
epsilon = 1.0e-4
y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
y_true = T.flatten(y_true)
# this seems to work
# it is super ugly though and I am sure there is a better way to do it
# but I am struggling with theano to cooperate
# filter the right indices
classPos = 1
classNeg = 0
indPos = T.eq(y_true, classPos).nonzero()[0]
indNeg = T.eq(y_true, classNeg).nonzero()[0]
pos = y_true[ indPos ]
neg = y_true[ indNeg ]
# shuffle
n = indPos.shape[0]
indPos = indPos[UNET.srng.permutation(n=n)]
n = indNeg.shape[0]
indNeg = indNeg[UNET.srng.permutation(n=n)]
# take equal number of samples depending on which class has less
# n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')
#n_samples = T.cast(T.min([T.sum(pos), T.sum(neg)]), dtype='int64')
n_samples = T.cast(T.min([ indPos.shape[0], indNeg.shape[0]]), dtype='int64')
#n_samples = T.cast(T.max(n_samples, 1), dtype='int64')
indPos = indPos[:n_samples]
indNeg = indNeg[:n_samples]
loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(y_pred_clipped[indNeg])).eval()
#loss_vector = T.clip(loss_vector, epsilon, 1.0-epsilon)
#loss_vector.set_value( np.array([0.99]) )
average_loss = T.mean(loss_vector)
return average_loss
@staticmethod
def unet_crossentropy_lossipp(y_true, y_pred):
classPos = 1
classNeg = 0
#weight_class_1 = 1.
epsilon = 1.0e-4
y_pred_clipped = T.clip(y_pred, epsilon, 1.0-epsilon)
loss_vector = -T.mean(weight_class_1*y_true * T.log(y_pred_clipped) + (1-y_true) * T.log(1-y_pred_clipped), axis=1)
average_loss = T.mean(loss_vector)
return average_loss
@staticmethod
def unet_crossentropy_loss_sampled(y_true, y_pred):
epsilon = 1.0e-4
y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
y_true = T.flatten(y_true)
# this seems to work
# it is super ugly though and I am sure there is a better way to do it
# but I am struggling with theano to cooperate
# filter the right indices
classPos = 1
classNeg = 0
indPos = T.eq(y_true, classPos).nonzero()[0]
indNeg = T.eq(y_true, classNeg).nonzero()[0]
#pos = y_true[ indPos ]
#neg = y_true[ indNeg ]
# shuffle
n = indPos.shape[0]
indPos = indPos[UNET.srng.permutation(n=n)]
n = indNeg.shape[0]
indNeg = indNeg[UNET.srng.permutation(n=n)]
# take equal number of samples depending on which class has less
n_samples = T.cast(T.min([ indPos.shape[0], indNeg.shape[0]]), dtype='int64')
#n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')
indPos = indPos[:n_samples]
indNeg = indNeg[:n_samples]
#loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(y_pred_clipped[indNeg]))
loss_vector = T.clip(loss_vector, epsilon, 1.0-epsilon)
average_loss = T.mean(loss_vector)
if T.isnan(average_loss):
average_loss = T.mean( y_pred_clipped[indPos])
return average_loss
@staticmethod
def unet_block_down(input, nb_filter, doPooling=True, doDropout=False, doBatchNorm=False, initialization = 'glorot_uniform', weight_decay = 0.):
# first convolutional block consisting of 2 conv layers plus activation, then maxpool.
# All are valid area, not same
act1 = Convolution2D(nb_filter=nb_filter, nb_row=3, nb_col=3, subsample=(1,1),
init=initialization, activation='relu', border_mode="valid", W_regularizer=l2(weight_decay))(input)
if doBatchNorm:
act1 = BatchNormalization(mode=0, axis=1)(act1)
act2 = Convolution2D(nb_filter=nb_filter, nb_row=3, nb_col=3, subsample=(1,1),
init=initialization, activation='relu', border_mode="valid", W_regularizer=l2(weight_decay))(act1)
if doBatchNorm:
act2 = BatchNormalization(mode=0, axis=1)(act2)
if doDropout:
act2 = Dropout(0.5)(act2)
if doPooling:
# now downsamplig with maxpool
pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode="valid")(act2)
else:
pool1 = act2
return (act2, pool1)
# need to define lambda layer to implement cropping
# input is a tensor of size (batchsize, channels, width, height)
@staticmethod
def crop_layer( x, cs):
cropSize = cs
return x[:,:,cropSize:-cropSize, cropSize:-cropSize]
@staticmethod
def unet_block_up(input, nb_filter, down_block_out, doBatchNorm=False, initialization = 'glorot_uniform', weight_decay = 0.):
print "This is unet_block_up"
print "input ", input._keras_shape
# upsampling
up_sampled = UpSampling2D(size=(2,2))(input)
print "upsampled ", up_sampled._keras_shape
# up-convolution
conv_up = Convolution2D(nb_filter=nb_filter, nb_row=2, nb_col=2, subsample=(1,1),
init=initialization, activation='relu', border_mode="same", W_regularizer=l2(weight_decay))(up_sampled)
print "up-convolution ", conv_up._keras_shape
# concatenation with cropped high res output
# this is too large and needs to be cropped
print "to be merged with ", down_block_out._keras_shape
#padding_1 = int((down_block_out._keras_shape[2] - conv_up._keras_shape[2])/2)
#padding_2 = int((down_block_out._keras_shape[3] - conv_up._keras_shape[3])/2)
#print "padding: ", (padding_1, padding_2)
#conv_up_padded = ZeroPadding2D(padding=(padding_1, padding_2))(conv_up)
#merged = merge([conv_up_padded, down_block_out], mode='concat', concat_axis=1)
cropSize = int((down_block_out._keras_shape[2] - conv_up._keras_shape[2])/2)
down_block_out_cropped = Lambda(UNET.crop_layer, output_shape=conv_up._keras_shape[1:], arguments={"cs":cropSize})(down_block_out)
print "cropped layer size: ", down_block_out_cropped._keras_shape
merged = merge([conv_up, down_block_out_cropped], mode='concat', concat_axis=1)
print "merged ", merged._keras_shape
# two 3x3 convolutions with ReLU
# first one halves the feature channels
act1 = Convolution2D(nb_filter=nb_filter, nb_row=3, nb_col=3, subsample=(1,1),
init=initialization, activation='relu', border_mode="valid", W_regularizer=l2(weight_decay))(merged)
if doBatchNorm:
act1 = BatchNormalization(mode=0, axis=1)(act1)
print "conv1 ", act1._keras_shape
act2 = Convolution2D(nb_filter=nb_filter, nb_row=3, nb_col=3, subsample=(1,1),
init=initialization, activation='relu', border_mode="valid", W_regularizer=l2(weight_decay))(act1)
if doBatchNorm:
act2 = BatchNormalization(mode=0, axis=1)(act2)
print "conv2 ", act2._keras_shape
return act2
def save(self, best=False):
print 'Model.save()'
if self.model == None:
return False
j_path, w_path, rev = self.get_paths(forSaving=True, forBest=best)
print 'saving model...'
json_string = self.model.to_json()
open(j_path, 'w').write(json_string)
self.model.save_weights(w_path, overwrite=True)
if not self.offline:
DB.finishSaveModel( self.project.id, rev )
return True
def load(self, best=False):
print 'Model.load()'
j_path, w_path, rev = self.get_paths(forSaving=False, forBest=best)
if not os.path.exists( j_path ) or not os.path.exists( w_path ):
return False
print 'loading model...'
self.model = model_from_json(open( j_path ).read())
self.model.load_weights( w_path )
return True
def threshold(self, prob, factor=0.5):
prob[ prob >= factor ] = 9
prob[ prob < factor ] = 1
prob[ prob == 9 ] = 0
return prob
def report_stats(self, elapsedTime, batchIndex, valLoss, trainCost):
if not self.offline:
DB.storeTrainingStats( self.project.id, valLoss, trainCost, mode=0)
msg = '(%0.1f) %i %f%%'%\
(
elapsedTime,
batchIndex,
valLoss
)
status = '[%f]'%(trainCost)
Utility.report_status( msg, status )
@staticmethod
def shared_dataset(data_xy, borrow=True, doCastLabels=True):
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
if not doCastLabels:
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
else:
shared_y = theano.shared(np.asarray(data_y,
dtype=np.int32),
borrow=borrow)
return shared_x, shared_y
def get_paths(self, forSaving=False, forBest=False):
name = '%s_%s'%(self.project.id, self.project.type)
prefix = 'best' if forBest else 'latest'
posfix = ''
revision = 0
if not self.offline:
revision = DB.getRevision( self.project.id )
revision = (revision+1)%10
posfix = '_%d'%(revision) if forBest else ''
else:
name = '%s_offline'%(name)
# construct the path to the network and weights
path = '%s/%s_%s%s'%(Paths.Models, prefix, name, posfix)
j_path = '%s.json'%(path)
w_path = '%s_weights.h5'%(path)
return j_path.lower(), w_path.lower(), revision
|
mit
|
joshloyal/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
42
|
7253
|
"""This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100,
download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
carltoews/tennis
|
data/tennis_atp-master/examples/examples.py
|
1
|
117401
|
'''
File name: examples.py
Description: all sorts of tennis-stats examples based on Jeff's tennis data (see: https://github.com/JeffSackmann/tennis_atp)
Comment: the approach to do the calculations may not always be the most pythonic way. please forgive :)
Author: Beta2k
Python Version: 3
'''
import csv
import pprint
import datetime
import glob
import sys
import operator
import itertools
import collections
from operator import itemgetter
from collections import OrderedDict
import json
import numpy as np
import pandas as pd
import math
from pandas.core.categorical import Categorical
from spyderlib.widgets.externalshell import namespacebrowser
#util functions
def parse(t):
ret = []
for ts in t:
try:
string = str(ts)
tsdt = datetime.date(int(string[:4]), int(string[4:6]), int(string[6:]))
except TypeError:
tsdt = datetime.date(1900,1,1)
ret.append(tsdt)
return ret
def readATPMatches(dirname):
"""Reads ATP matches but does not parse time into datetime object"""
allFiles = glob.glob(dirname + "/atp_matches_" + "????.csv")
matches = pd.DataFrame()
container = list()
for filen in allFiles:
df = pd.read_csv(filen,
index_col=None,
header=0)
container.append(df)
matches = pd.concat(container)
return matches
def readATPMatchesParseTime(dirname):
"""Reads ATP matches and parses time into datetime object"""
allFiles = glob.glob(dirname + "/atp_matches_" + "????.csv")
matches = pd.DataFrame()
container = list()
for filen in allFiles:
df = pd.read_csv(filen,
index_col=None,
header=0,
parse_dates=[5],
encoding = "ISO-8859-1",
date_parser=lambda t:parse(t))
container.append(df)
matches = pd.concat(container)
return matches
def readFMatches(dirname):
"""Reads ITF future matches but does not parse time into datetime object"""
allFiles = glob.glob(dirname + "/atp_matches_futures_" + "????.csv")
matches = pd.DataFrame()
container = list()
for filen in allFiles:
df = pd.read_csv(filen,
index_col=None,
header=0)
container.append(df)
matches = pd.concat(container)
return matches
def readFMatchesParseTime(dirname):
"""Reads ITF future matches but does not parse time into datetime object"""
allFiles = glob.glob(dirname + "/atp_matches_futures_" + "????.csv")
matches = pd.DataFrame()
container = list()
for filen in allFiles:
df = pd.read_csv(filen,
index_col=None,
header=0,
parse_dates=[5],
encoding = "ISO-8859-1",
date_parser=lambda t:parse(t))
container.append(df)
matches = pd.concat(container)
return matches
def readChall_QATPMatchesParseTime(dirname):
"""reads Challenger level + ATP Q matches and parses time into datetime objects"""
allFiles = glob.glob(dirname + "/atp_matches_qual_chall_" + "????.csv")
matches = pd.DataFrame()
container = list()
for filen in allFiles:
df = pd.read_csv(filen,
index_col=None,
header=0,
parse_dates=[5],
encoding = "ISO-8859-1",
date_parser=lambda t:parse(t))
container.append(df)
matches = pd.concat(container)
return matches
def readChall_QATPMatches(dirname):
"""reads Challenger level + ATP Q matches but does not parse time into datetime objects"""
allFiles = glob.glob(dirname + "/atp_matches_qual_chall_" + "????.csv")
matches = pd.DataFrame()
container = list()
for filen in allFiles:
df = pd.read_csv(filen,
index_col=None,
header=0)
container.append(df)
matches = pd.concat(container)
return matches
def readAllRankings(dirname):
"""reads all ranking files"""
allFiles = glob.glob(dirname + "/atp_rankings_" + "*.csv")
#allFiles = ['..\\atp_rankings_00s.csv', '..\\atp_rankings_10s.csv']
ranks = pd.DataFrame()
container = list()
for filen in allFiles:
df = pd.read_csv(filen,
index_col=None,
header=None,
parse_dates=[0],
encoding = "ISO-8859-1",
date_parser=lambda t:parse(t))
container.append(df)
ranks = pd.concat(container)
return ranks
def parse_date(td):
"""helper function to parse time"""
resYear = float(td.days)/364.0 # get the number of years including the the numbers after the dot
resMonth = int((resYear - int(resYear))*364/30) # get the number of months, by multiply the number after the dot by 364 and divide by 30.
resYear = int(resYear)
return str(resYear) + "y" + str(resMonth) + "m"
def yearmonthdiff(row):
s = row['ranking_date']
e = row['dob']
return relativedelta.relativedelta(s,e)
def get_date_wins(in_group):
temp = atpmatches[(atpmatches['winner_name'] == in_group.name) & (atpmatches['round'] == 'F')]
in_group["tournament_wins"] = in_group.apply(lambda x: len(temp[temp['tourney_date'] < x['ranking_date']]), axis=1)
return in_group
def getRankForPreviousMonday(tdate,playername):
"""utility function to calculate the rank of a player from the previous week"""
global joinedrankingsdf
print(tdate)
print(playername)
#some tournaments start on a sunday, so we change this to a monday in order to get the correct ranking later on (we only have rankings for mondays obviously)
if (tdate.weekday() != 0):
diff = 7 - tdate.weekday()
tdate = tdate + datetime.timedelta(days = diff)
for x in range(1, 3):
prevmon = tdate - datetime.timedelta(days = 7*x)
if (len(joinedrankingsdf[(joinedrankingsdf['date'] == prevmon)]) > 0):
rank = joinedrankingsdf[((joinedrankingsdf['fullname'] == playername) & (joinedrankingsdf['date'] == prevmon))].iloc[[0]]['rank'].values[0]
return rank
break
else:
continue
#calculations
def matchesPerCountryAndRound(matches):
"""find single matches based on country and round"""
matches = matches[(matches['round']=='F') & (matches['winner_ioc'] == 'AUT') & (matches['loser_ioc'] == 'AUT')]
matches = matches.sort(['tourney_date'], ascending=False)
#print matches.to_string(columns=['tourney_name','tourney_date','winner_name', 'loser_name'])
print(matches[['tourney_name','tourney_date','winner_name', 'loser_name']].to_csv(sys.stdout,index=False))
def bestLLinGrandSlams(matches):
"""looking for LLs who got deepes int grand slam draws starting from R32"""
matches = matches[((matches['round']=='R32') | (matches['round']=='R16') | (matches['round']=='QF') | (matches['round']=='SF') | (matches['round']=='F')) & (matches['tourney_level'] == 'G') & (matches['loser_entry'] == 'LL')]
matches = matches.sort(['tourney_date'], ascending=False)
print(matches[['tourney_name','tourney_date','round','winner_name','winner_entry', 'loser_name', 'loser_entry']].to_csv(sys.stdout,index=False))
def numberOfSetsLongerThan(matches,sets,minutes):
"""find matches longer than 'minutes' with 'sets' number of played sets"""
matches['score'].astype('str')
matches = matches[(matches['minutes'] > minutes) & (matches['score'].str.count('-') == sets)]
matches = matches.sort(['minutes'], ascending=False)
print(matches[['minutes','score','tourney_name','tourney_date','round','winner_name', 'loser_name']].to_csv(sys.stdout,index=False))
def geth2hforplayer(matches,name):
"""get all head-to-heads of the player"""
matches = matches[(matches['winner_name'] == name) | (matches['loser_name'] == name)]
h2hs = {}
for index, match in matches.iterrows():
if (match['winner_name'] == name):
if (match['loser_name'] not in h2hs):
h2hs[match['loser_name']] = {}
h2hs[match['loser_name']]['l'] = 0
h2hs[match['loser_name']]['w'] = 1
else:
h2hs[match['loser_name']]['w'] = h2hs[match['loser_name']]['w']+1
elif (match['loser_name'] == name):
if (match['winner_name'] not in h2hs):
h2hs[match['winner_name']] = {}
h2hs[match['winner_name']]['w'] = 0
h2hs[match['winner_name']]['l'] = 1
else:
h2hs[match['winner_name']]['l'] = h2hs[match['winner_name']]['l']+1
#create list
h2hlist = []
for k, v in h2hs.items():
h2hlist.append([k, v['w'],v['l']])
#sort by wins and then by losses + print
#filter by h2hs with more than 6 wins:
#h2hlist = [i for i in h2hlist if i[1] > 6]
if (len(h2hlist) == 0):
return ''
else:
return sorted(h2hlist, key=itemgetter(1,2))
#for h2h in h2hlist:
# print(name+';'+h2h[0]+';'+str(h2h[1])+';'+str(h2h[2]))
def getWinLossByPlayer(qmatches, activeplayers, active):
"""returns w/l by player - change tourney level here (S=futures, C=challenger, etc.). of course matches of
the queries tourney level need to be in the matches provided as an argument to this function"""
retmatches = qmatches[qmatches['score'].str.contains('RET').fillna(False)]
retmatches_group = retmatches.groupby('loser_name').size().order(ascending=False)
matches = qmatches[(qmatches['tourney_level'] == 'S')]
finals = qmatches[(qmatches['round'] == 'F')]
semifinals = qmatches[(qmatches['round'] == 'SF')]
titles_group = finals.groupby('winner_name').size()
finals_group = semifinals.groupby('winner_name').size()
w_group = matches.groupby('winner_name').size()
l_group = matches.groupby('loser_name').size()
scores = pd.DataFrame({'wins' : w_group, 'losses' : l_group}).fillna(0)
scores[['wins', 'losses']] = scores[['wins', 'losses']].astype(int)
scores = scores.reindex_axis(['wins','losses'], axis=1)
if (active):
MAX_RANK = 20
activeplayerslist = [row[0] for row in activeplayers]
scores = scores[(scores.index.isin(activeplayerslist[:MAX_RANK]))]
scores.index = pd.CategoricalIndex(scores.index, categories=activeplayerslist, ordered=True)
#toggle sorting by ranking in next line
scores = scores.sort_index()
#todo: add titles and finals
scores["matches"] = scores["wins"] + scores["losses"]
scores["percentage"] = np.round(scores["wins"]*100/scores["matches"],2)
#to see a column name when printig results
scores.index.name = 'pname'
scores = scores.join(pd.DataFrame(finals_group, columns = ['finals'],)).fillna(0)
scores = scores.join(pd.DataFrame(titles_group, columns = ['titles'],)).fillna(0)
scores = scores.join(pd.DataFrame(retmatches_group, columns = ['rets'],)).fillna(0)
#changing datatype to int because it automatically is changed to float because of NaN values added for players which have 0 titles.
#even though the NaNs are immediately replaced by 0 values, the dtype of the column is changed from int to float.
#this is the reason why i need to change it back to int in the next line
scores['titles'] = scores['titles'].astype('int')
scores['finals'] = scores['finals'].astype('int')
scores['rets'] = scores['rets'].astype('int')
#sort by wins
scores = scores.sort(['titles'], ascending=False)
print(scores.to_csv(sys.stdout,index=True))
def getRets(matches):
matches = matches[matches['score'].str.contains('RET').fillna(False)]
l_group = matches.groupby('loser_name').size().order(ascending=False)
print(l_group.to_csv(sys.stdout,index=True))
def findLLQmultipleMatchesAtSameTournament(atpmatches,qmatches):
"""find if LL had to play same player in Q3/Q2 and MD of same tournament"""
resultlist = list()
tourney_group = atpmatches.groupby('tourney_id')
for tname, tdf in tourney_group:
found1=False
found2=False
#first_case finds where a LL won against a Q in a main draw (MD)
first_case = tdf[(tdf['winner_entry'] == 'LL') & (tdf['loser_entry'] == 'Q')]
#iterating over first_case matches
for index, match in first_case.iterrows():
#looking for Q-finals where the loser matches the name of a winner of first_case matches
first_case_results = qmatches[(qmatches['tourney_name'] == match['tourney_name']+ ' Q') & ((qmatches['round'] =='Q2') | (qmatches['round'] =='Q3')) & (match['winner_name'] == qmatches['loser_name']) & (match['loser_name'] == qmatches['winner_name'])]
if (len(first_case_results.index) > 0):
#if results were found, add the MD match to the result list
resultlist.append(first_case[((first_case['winner_name'] == first_case_results['loser_name']))])
#second_case finds where a LL lost against a Q in a main draw (MD)
second_case = tdf[(tdf['winner_entry'] == 'Q') & (tdf['loser_entry'] == 'LL')]
for index, match in second_case.iterrows():
#looking for Q-finals where the loser matches the name of a loser of second_case matches
second_case_results = qmatches[(qmatches['tourney_name'] == match['tourney_name']+ ' Q') & ((qmatches['round'] =='Q2') | (qmatches['round'] =='Q3')) & (match['winner_name'] == qmatches['winner_name']) & (match['loser_name'] == qmatches['loser_name'])]
if (len(second_case_results.index) > 0):
#if results were found, add the MD match to the result list
resultlist.append(second_case[(second_case['loser_name'] == second_case_results['loser_name'])])
result = pd.concat(resultlist).sort(['tourney_date'], ascending=False)
print(result[['tourney_name','tourney_date','round','winner_name','winner_entry', 'loser_name','loser_entry']].to_csv(sys.stdout,index=False))
def getActivePlayers(dirname):
"""finds active players, i.e. players who are in the current ranking"""
currentRanking = dirname + "/atp_rankings_current.csv"
playersDB = dirname + "/atp_players.csv"
rankingdf = pd.DataFrame()
playersdf = pd.DataFrame()
rankingdf = pd.read_csv(currentRanking,index_col=None,header=None)
rankingdf.columns = ['date', 'rank', 'id','points']
playersdf = pd.read_csv(playersDB,index_col=None,header=None, encoding = "ISO-8859-1")
playersdf.columns = ['id', 'fname', 'lname','hand','dob','country']
maxdate = rankingdf['date'].max()
rankingdf = rankingdf[(rankingdf['date'] == maxdate)]
join = pd.merge(rankingdf,playersdf, on='id')
join["fullname"] = join["fname"] + ' ' + join["lname"]
join = join[['fullname','rank']]
namelist = join.values.tolist()
return namelist
def seedRanking(matches):
"""finds rankings of seeds"""
#todo: create new DF to merge when seed won/lost into one column.
#also take into account that in old draws the seeds did not have byes in R32. this also needs to be filtered.
wmatches = matches[((matches['round'] == 'R16') & (matches['winner_seed'] == 3) & (matches['winner_rank'] > 40))]
print(wmatches[['tourney_name','tourney_date','winner_name', 'winner_rank', 'winner_seed']].to_csv(sys.stdout,index=False))
lmatches = matches[(matches['round'] == 'R16') & (matches['loser_seed'] == 3) & (matches['loser_rank'] > 40)]
print(lmatches[['tourney_name','tourney_date','loser_name', 'loser_rank', 'loser_seed']].to_csv(sys.stdout,index=False))
def qualifierSeeded(atpmatches):
"""finds qualifiers, which were seeded"""
lmatches = atpmatches[((atpmatches['loser_entry'] == 'Q') & (atpmatches['loser_seed'] < 9))]
lmatches = lmatches.rename(columns={'loser_entry': 'entry', 'loser_seed': 'seed', 'loser_name': 'name'})
wmatches = atpmatches[((atpmatches['winner_entry'] == 'Q') & (atpmatches['winner_seed'] < 9) & (atpmatches['round'] == 'F'))]
wmatches = wmatches.rename(columns={'winner_entry': 'entry', 'winner_seed': 'seed', 'winner_name': 'name'})
frames = [lmatches, wmatches]
result = pd.concat(frames)
result['seed'] = result['seed'].astype('int')
result = result.sort(['tourney_date','tourney_name'], ascending=[True,True])
print(result[['tourney_date','tourney_name','name','entry','seed','round']].to_csv(sys.stdout,index=False))
def getDictEightSeedRankperTourney(matches):
"""util function which returns a dictionary containing ranks of 8 seeds per tournament"""
#max_seed for ATP = 32 + buffer
#max_seed for CH = 8 + buffer
MAX_SEED = 32
tgroup = matches.groupby('tourney_id')
rankdict = {}
rankdict['id'] = {}
rankdict['altid'] = {}
for tid, t in tgroup:
#print t
found = False
for n in range(MAX_SEED,6,-1):
if (len(t[(t['loser_seed'] == n)]) > 0):
tr = t[(t['loser_seed'] == n)]
eightrank = tr.iloc[[0]]['loser_rank']
eightrank = eightrank.values[0]
found = True
elif (len(t[(t['winner_seed'] == n)]) > 0):
tr = t[(t['winner_seed'] == n)]
eightrank = tr.iloc[[0]]['winner_seed']
eightrank = eightrank.values[0]
found = True
if (found):
#print 'added ' + str(tid)
rankdict[tid] = eightrank
tname = tr.iloc[[0]]['tourney_name'].values[0]
tyear = tr.iloc[[0]]['tourney_date'].values[0]
rankdict['id'][tid] = eightrank
altid = str(tname)+str(tyear)[:4]
#print 'added ' + str(altid)
rankdict['altid'][altid] = eightrank
break
return rankdict
def highRankedQLosers(qmatches,atpmatches):
"""finds high ranked losers of qualification draws"""
amatches = atpmatches[((atpmatches['tourney_level'] == 'A') & (atpmatches['tourney_date'] > 20000000))]
qmatches = qmatches[((qmatches['tourney_level'] == 'Q') & (qmatches['tourney_date'] > 20000000))]
rankdict = getDictEightSeedRankperTourney(amatches)
rankdict = rankdict['altid']
rankdf = pd.DataFrame(list(rankdict.items()),columns=['id','8seedrank'])
rankdf['year'] = rankdf.id.str[-4:]
rankdf['year'] = rankdf['year'].astype(int)
rankdf['tourney_name'] = rankdf.id.str[:-4] + ' Q'
qmatches['date4'] = (qmatches['tourney_date']/10000).astype(int)
merged = rankdf.merge(qmatches, left_on=['year', 'tourney_name'], right_on=['date4', 'tourney_name'])
merged = merged[(merged['loser_rank'] < merged['8seedrank'])]
print(merged[['tourney_id','tourney_date','tourney_name','loser_name','loser_rank', '8seedrank']].sort(['tourney_date'], ascending=[True]).to_csv(sys.stdout,index=False))
def fedR4WimbiTime(atpmatches):
"""shows the time federer spent on court until 4th rounds in wimbledon"""
atpmatches = atpmatches[(atpmatches['tourney_name'] == 'Wimbledon')]
atpmatches = atpmatches[(atpmatches['winner_name'] == 'Roger Federer')]
atpmatches = atpmatches[(atpmatches['round'] == 'R128') | (atpmatches['round'] == 'R64') | (atpmatches['round'] == 'R32') | (atpmatches['round'] == 'R16')]
matchesgroup = atpmatches.groupby('tourney_id')
print(matchesgroup['minutes'].sum())
def youngFutures(matches):
"""finds young futures players
set round and age parameter in next line"""
matches = matches[(matches['round'] == 'QF') & (matches['winner_age'] < 16)]
matches = matches.sort(['winner_age'], ascending=True)
print(matches[['tourney_name','tourney_date','winner_name', 'winner_age', 'loser_name']].to_csv(sys.stdout,index=False))
def rankofQhigherthanlastSeed(matches):
"""find players and tournaments where the rank of the 1-seeded player in the qualies-draw
was higher than the last seed of the main draw"""
#if input is challenger use next line
#matches = matches[((matches['tourney_level'] == 'C') & (matches['tourney_date'] > 20000000))]
#if input is atp tour use next line
matches = matches[((matches['tourney_level'] == 'A') & (matches['tourney_date'] > 20100000))]
rankdict = getDictEightSeedRankperTourney(matches)
rankdict = rankdict['id']
results = {}
matches = matches[((matches['winner_entry'] == 'Q') | (matches['loser_entry'] == 'Q'))]
qgroup = matches.groupby('tourney_id')
for tid, t in qgroup:
#winner = q
for index, match in t[(t['winner_entry'] == 'Q')].iterrows():
try:
if (match['winner_rank'] < rankdict[tid]):
if (tid in results):
if (match['winner_name'] not in results[tid]['players']):
results[tid]['players'].append(match['winner_name'])
else:
results[tid] = {}
results[tid]['id'] = match['tourney_id']
results[tid]['name'] = match['tourney_name']
results[tid]['date'] = match['tourney_date']
results[tid]['players'] = []
results[tid]['players'].append(match['winner_name'])
except KeyError:
continue
#loser = q
for index, match in t[(t['loser_entry'] == 'Q')].iterrows():
try:
if (match['loser_rank'] < rankdict[tid]):
if (tid in results):
if (match['loser_name'] not in results[tid]['players']):
results[tid]['players'].append(match['loser_name'])
else:
results[tid] = {}
results[tid]['id'] = match['tourney_id']
results[tid]['name'] = match['tourney_name']
results[tid]['date'] = match['tourney_date']
results[tid]['players'] = []
results[tid]['players'].append(match['loser_name'])
except KeyError:
continue
orderedRes = (OrderedDict(sorted(results.items(), key=lambda x: x[1]['date'])))
for t in orderedRes:
playerstring = ','.join(orderedRes[t]['players'])
yearid = t.split('-')
year = yearid[0]
id = yearid[1]
md = 'http://www.atpworldtour.com/posting/'+str(year)+'/'+str(id)+'/mds.pdf'
qd = 'http://www.atpworldtour.com/posting/'+str(year)+'/'+str(id)+'/qs.pdf'
print(str(orderedRes[t]['date'])+','+orderedRes[t]['name']+',' + playerstring + ','+md+','+qd)
def avglastseedrank(matches):
"""calculates the average of the last seed rank per tournament category"""
#only matches from 2013 and 2014
matches = matches[(matches['tourney_date'] > datetime.date(2012,12,29)) & (matches['tourney_date'] < datetime.date(2015,1,1))]
#atp 500
#if draw size = 32, then 8 seeds
#if draw size = 48, then 16 seeds
#if draw size = 56, then 16 seeds
tourney500names = ['Rotterdam', 'Rio de Janeiro', 'Acapulco', 'Dubai', 'Barcelona', 'Hamburg', 'Washington', 'Beijing', 'Tokyo', 'Valencia', 'Basel', 'Memphis']
matches500 = matches[matches['tourney_name'].isin(tourney500names)]
#remove 2014-402 (= memphis) because in 2014 it was a 250
matches500 = matches500[(matches500['tourney_id'] != '2014-402')]
matches500group = matches500.groupby('tourney_id')
print("500============")
getLastSeedRankForGroupedTourneys(matches500group)
#atp 1000
#if draw size = 48, then 16 seeds
#if draw size = 56, then 16 seeds
#if draw size = 96, then 32 seeds
matches1000 = matches[(matches['tourney_level'] == 'M')]
matches1000group = matches1000.groupby('tourney_id')
print("1000============")
getLastSeedRankForGroupedTourneys(matches1000group)
#atp 250
#if draw size = 28, then 8 seeds
#if draw size = 32, then 8 seeds
#if draw size = 48, then 16 seeds
memphis2014 = matches[(matches['tourney_id'] == '2014-402')]
matches250 = matches[(matches['tourney_level'] == 'A')]
matches250 = matches250[~matches250['tourney_name'].isin(tourney500names)]
#add memphis for 2014, because it became a 250
matches250 = pd.concat([matches250, memphis2014])
matches250group = matches250.groupby('tourney_id')
print("250============")
getLastSeedRankForGroupedTourneys(matches250group)
#Grand Slam
#if draw size = 128, then 32 seeds
matchesG = matches[(matches['tourney_level'] == 'G')]
matchesGgroup = matchesG.groupby('tourney_id')
print("GS============")
getLastSeedRankForGroupedTourneys(matchesGgroup)
def rankingPointsOfYoungsters(players,ranks):
"""calculates ranking points of young players to answer questions like
"how many ranking points did players younger than 16y have?"""
players.columns = ['id', 'fname', 'lname', 'hand', 'dob', 'country']
players['full_name'] = players['fname'] + ' ' + players['lname']
ranks.columns = ['ranking_date', 'rank', 'player_id', 'points']
join = pd.merge(ranks,players, left_on='player_id', right_on='id')
join['age'] = join['ranking_date'] - join['dob']
join = join[join['age'] > datetime.timedelta(days = 0)]
join = join[(join['age'] < datetime.timedelta(days = 5875))]
#join['readAge'] = [parse_date(ranking_date - dob) for ranking_date, dob in zip(join["ranking_date"], join["dob"])]
join['readAge'] = join.apply(yearmonthdiff, axis=1)
join['points'] = join['points'].astype('int')
join = join[(join['points'] > 5)]
join = join.groupby('full_name', as_index=False).apply(lambda g: g.loc[g.age.idxmin()])
join = join.sort(['age'], ascending=False)
print(join[['ranking_date','dob', 'points','rank','readAge', 'full_name', 'country']].to_csv(sys.stdout,index=False))
def getLastSeedRankForGroupedTourneysDeprecated(groupedmatches):
"""DEPRECATED: returns the rank of the last seed for a give tournament"""
resultlist = []
resultlist8 = []
resultlist16 = []
resultlist32 = []
for tid, tmatches in groupedmatches:
print(tid + ' - ' + str(len(tmatches)) + ' - ' + tmatches.iloc[[0]]['tourney_name'].values[0])
if ((len(tmatches) == 55) | (len(tmatches) == 47)):
maxseed = 16
#look for ranking of 16 seed
elif ((len(tmatches) == 31) | (len(tmatches) == 27)):
#look for ranking of 8 seed
maxseed = 8
elif ((len(tmatches) == 95) | (len(tmatches) == 127)) :
#look for ranking of 32 seed
maxseed = 32
try:
tempmatches = tmatches[(tmatches['loser_seed'] == maxseed)]
if (len(tempmatches) == 1):
rank = tempmatches.iloc[[0]]['loser_rank'].values[0]
elif (len(tempmatches) < 1):
tempmatches = tmatches[(tmatches['winner_seed'] == maxseed)]
rank = tempmatches.iloc[[0]]['winner_rank'].values[0]
print(rank)
resultlist.append(rank)
if (maxseed == 8):
resultlist8.append(rank)
elif (maxseed == 16):
resultlist16.append(rank)
elif (maxseed == 32):
resultlist32.append(rank)
except:
continue
print("Overall:")
print(resultlist)
resultlist = np.asarray(resultlist)
print("Mean : {0:8.2f}".format(resultlist.mean()))
print("Minimum : {0:8.0f}".format(resultlist.min()))
print("Maximum : {0:8.0f}".format(resultlist.max()))
print("Std. deviation : {0:8.2f}".format(resultlist.std()))
if (len(resultlist8) > 0):
print("8 Seeds:")
print(resultlist8)
resultlist8 = np.asarray(resultlist8)
print("Mean : {0:8.2f}".format(resultlist8.mean()))
print("Minimum : {0:8.0f}".format(resultlist8.min()))
print("Maximum : {0:8.0f}".format(resultlist8.max()))
print("Std. deviation : {0:8.2f}".format(resultlist8.std()))
if (len(resultlist16) > 0):
print(resultlist16)
resultlist16 = np.asarray(resultlist16)
print("16 Seeds:")
print("Mean : {0:8.2f}".format(resultlist16.mean()))
print("Minimum : {0:8.0f}".format(resultlist16.min()))
print("Maximum : {0:8.0f}".format(resultlist16.max()))
print("Std. deviation : {0:8.2f}".format(resultlist16.std()))
if (len(resultlist32) > 0):
print("32 Seeds:")
print(resultlist32)
resultlist32 = np.asarray(resultlist32)
print("Mean : {0:8.2f}".format(resultlist32.mean()))
print("Minimum : {0:8.0f}".format(resultlist32.min()))
print("Maximum : {0:8.0f}".format(resultlist32.max()))
print("Std. deviation : {0:8.2f}".format(resultlist32.std()))
def getLastSeedRankForGroupedTourneys(groupedmatches):
"""returns the rank of the last seed for a give tournament"""
global joinedrankingsdf
#read rankings
dirname = ".."
ranking10s = dirname + "/atp_rankings_10s.csv"
playersDB = dirname + "/atp_players.csv"
rankingdf = pd.DataFrame()
playersdf = pd.DataFrame()
rankingdf = pd.read_csv(ranking10s,index_col=None,header=None,
parse_dates=[0],
date_parser=lambda t:parse(t))
rankingdf.columns = ['date', 'rank', 'id','points']
playersdf = pd.read_csv(playersDB,index_col=None,header=None)
playersdf.columns = ['id', 'fname', 'lname','hand','dob','country']
joinedrankingsdf = pd.merge(rankingdf,playersdf, on='id')
joinedrankingsdf["fullname"] = joinedrankingsdf["fname"] + ' ' + joinedrankingsdf["lname"]
resultlist = []
resultlist8 = []
resultlist16 = []
resultlist32 = []
for tid, tmatches in groupedmatches:
#if (tid == '2013-404'):
print(tid + ' - ' + str(len(tmatches)) + ' - ' + tmatches.iloc[[0]]['tourney_name'].values[0])
if ((len(tmatches) == 55) | (len(tmatches) == 47)):
maxseed = 16
#look for ranking of 16 seed
elif ((len(tmatches) == 31) | (len(tmatches) == 27)):
#look for ranking of 8 seed
maxseed = 8
elif ((len(tmatches) == 95) | (len(tmatches) == 127)) :
#look for ranking of 32 seed
maxseed = 32
try:
tempmatches = tmatches[(tmatches['loser_seed'] == maxseed)]
if (len(tempmatches) == 1):
#rank = tempmatches.iloc[[0]]['loser_rank'].values[0]
playername = tempmatches.iloc[[0]]['loser_name'].values[0]
tdate = tempmatches.iloc[[0]]['tourney_date'].values[0]
#try previous mondays and if found we are fine.
rank = getRankForPreviousMonday(tdate,playername)
elif (len(tempmatches) < 1):
tempmatches = tmatches[(tmatches['winner_seed'] == maxseed)]
#rank = tempmatches.iloc[[0]]['winner_rank'].values[0]
playername = tempmatches.iloc[[0]]['winner_name'].values[0]
tdate = tempmatches.iloc[[0]]['tourney_date'].values[0]
#try previous mondays
rank = getRankForPreviousMonday(tdate,playername)
print(rank)
resultlist.append(rank)
if (maxseed == 8):
resultlist8.append(rank)
elif (maxseed == 16):
resultlist16.append(rank)
elif (maxseed == 32):
resultlist32.append(rank)
except Exception as e:
s = str(e)
print(e)
print("Exception likely due to last seed having withdrawn. So we ignore this and it's fine!")
continue
print("Overall:")
print(resultlist)
resultlist = np.asarray(resultlist)
print("Mean : {0:8.2f}".format(resultlist.mean()))
print("Minimum : {0:8.0f}".format(resultlist.min()))
print("Maximum : {0:8.0f}".format(resultlist.max()))
print("Std. deviation : {0:8.2f}".format(resultlist.std()))
if (len(resultlist8) > 0):
print("8 Seeds:")
print(resultlist8)
resultlist8 = np.asarray(resultlist8)
print("Mean : {0:8.2f}".format(resultlist8.mean()))
print("Minimum : {0:8.0f}".format(resultlist8.min()))
print("Maximum : {0:8.0f}".format(resultlist8.max()))
print("Std. deviation : {0:8.2f}".format(resultlist8.std()))
if (len(resultlist16) > 0):
print(resultlist16)
resultlist16 = np.asarray(resultlist16)
print("16 Seeds:")
print("Mean : {0:8.2f}".format(resultlist16.mean()))
print("Minimum : {0:8.0f}".format(resultlist16.min()))
print("Maximum : {0:8.0f}".format(resultlist16.max()))
print("Std. deviation : {0:8.2f}".format(resultlist16.std()))
if (len(resultlist32) > 0):
print("32 Seeds:")
print(resultlist32)
resultlist32 = np.asarray(resultlist32)
print("Mean : {0:8.2f}".format(resultlist32.mean()))
print("Minimum : {0:8.0f}".format(resultlist32.min()))
print("Maximum : {0:8.0f}".format(resultlist32.max()))
print("Std. deviation : {0:8.2f}".format(resultlist32.std()))
def getBestQGrandSlamPlayer(qmatches,rankings):
"""returns highgest ranked players in grand slame quali-draws in order to find out the best cutoff for grand slam qualies"""
global joinedrankingsdf
#join rankings with playernames
dirname = ".."
playersDB = dirname + "/atp_players.csv"
rankings.columns = ['date', 'rank', 'id','points']
playersdf = pd.read_csv(playersDB,index_col=None,header=None)
playersdf.columns = ['id', 'fname', 'lname','hand','dob','country']
joinedrankingsdf = pd.merge(rankings,playersdf, on='id')
joinedrankingsdf["fullname"] = joinedrankingsdf["fname"] + ' ' + joinedrankingsdf["lname"]
qmatches = qmatches[(qmatches['tourney_name'] == 'Australian Open Q') | (qmatches['tourney_name'] == 'Roland Garros Q') | (qmatches['tourney_name'] == 'US Open Q') | (qmatches['tourney_name'] == 'Wimbledon Q')]
matchesgroup = qmatches.groupby('tourney_id')
res = {}
for tid, tmatches in matchesgroup:
name = tid[:4] + '-' + tmatches.iloc[[0]]['tourney_name'].values[0]
print(name)
#get all players into a set
w_set = set(tmatches['winner_name'])
l_set = set(tmatches['loser_name'])
#u_set contains all names of participating players
u_set = w_set.union(l_set)
#get ranking date
tdate = tmatches.iloc[[0]]['tourney_date'].values[0]
#q deadline is 4 weeks earlier
#deadline_date = tdate - datetime.timedelta(days = 28)
#alternatively take 6 weeks earlier deadline (= md deadline)
deadline_date = tdate - datetime.timedelta(days = 42)
if (deadline_date.weekday() == 6):
deadline_date = deadline_date + datetime.timedelta(days = 1)
#get rankings for each player in the set for deadline_date
player_list = list(u_set)
plist_df = pd.DataFrame(player_list)
plist_df.columns = ['fullname']
plist_df['entry_date'] = deadline_date
merged = plist_df.merge(joinedrankingsdf, left_on=['fullname', 'entry_date'], right_on=['fullname', 'date'])
merged = merged.sort(['rank'], ascending=True)
#print(merged[['fullname', 'rank']].head(1))
print(merged[['fullname', 'rank']].head(1).to_csv(sys.stdout, header=False, index=False))
fullname = merged.head(1).iloc[[0]]['fullname'].values[0]
rank = merged.head(1).iloc[[0]]['rank'].values[0]
res[name] = [fullname , rank]
for key, value in sorted(res.items(), key=lambda e: e[1][1]):
print(key+','+value[0]+','+str(value[1]))
def getAces(matches):
"""find matches where a player hit many aces.
define the threshold in the next line"""
matches = matches[((matches['w_ace'] > 45) | (matches['l_ace'] > 45))]
print(matches[['tourney_date','tourney_name','winner_name','loser_name','w_ace','l_ace', 'score']].to_csv(sys.stdout,index=False))
def getShortestFiveSetter(matches):
"""finds short 5 set matches.
define your own thresholds by changing the values in the line after the next."""
matches['score'].astype('str')
matches = matches[(matches['minutes'] < 150) & (matches['score'].str.count('-') == 5)]
matches = matches.sort(['minutes'], ascending=True)
print(matches[['minutes','score','tourney_name','tourney_date','round','winner_name', 'loser_name']].to_csv(sys.stdout,index=False))
def getworstlda(matches):
"""find the worst tournaments in terms of 'last direct acceptance'"""
global joinedrankingsdf
#join rankings with playernames
dirname = ".."
playersDB = dirname + "/atp_players.csv"
rankings.columns = ['date', 'rank', 'id','points']
playersdf = pd.read_csv(playersDB,index_col=None,header=None)
playersdf.columns = ['id', 'fname', 'lname','hand','dob','country']
joinedrankingsdf = pd.merge(rankings,playersdf, on='id')
joinedrankingsdf["fullname"] = joinedrankingsdf["fname"] + ' ' + joinedrankingsdf["lname"]
matches = matches[((matches['tourney_level'] == 'A') & (matches['tourney_date'] > datetime.date(2007,1,1)) & ((matches['draw_size'] == 32) | (matches['draw_size'] == 28)))]
tgroup = matches.groupby('tourney_id')
res = {}
for tid, tmatches in tgroup:
name = tid + '-' + tmatches.iloc[[0]]['tourney_name'].values[0]
print(name)
#get all players into a set
w_set = set(tmatches[(tmatches['winner_entry'] != 'Q') & (tmatches['winner_entry'] != 'WC') & (tmatches['winner_entry'] != 'LL')]['winner_name'])
l_set = set(tmatches[(tmatches['loser_entry'] != 'Q') & (tmatches['loser_entry'] != 'WC') & (tmatches['loser_entry'] != 'LL')]['loser_name'])
#u_set contains all names of participating players
u_set = w_set.union(l_set)
#get ranking date
tdate = tmatches.iloc[[0]]['tourney_date'].values[0]
deadline_date = tdate - datetime.timedelta(days = 42)
#print(deadline_date.weekday())
if (deadline_date.weekday() == 6):
deadline_date = deadline_date + datetime.timedelta(days = 1)
print(deadline_date)
for x in range(0, 3):
deadline_date = deadline_date - datetime.timedelta(days = 7*x)
if (len(joinedrankingsdf[(joinedrankingsdf['date'] == deadline_date)]) > 0):
print("gefunden")
print(deadline_date)
break
else:
continue
#get rankings for each player in the set for deadline_date
player_list = list(u_set)
plist_df = pd.DataFrame(player_list)
plist_df.columns = ['fullname']
plist_df['entry_date'] = deadline_date
merged = plist_df.merge(joinedrankingsdf, left_on=['fullname', 'entry_date'], right_on=['fullname', 'date'])
merged = merged.sort(['rank'], ascending=False)
print(merged[['fullname', 'rank']].head(1).to_csv(sys.stdout, header=False, index=False))
try:
fullname = merged.head(1).iloc[[0]]['fullname'].values[0]
rank = merged.head(1).iloc[[0]]['rank'].values[0]
res[name] = [fullname , rank]
except IndexError:
continue
for key, value in sorted(res.items(), key=lambda e: e[1][1]):
print(key+','+value[0]+','+str(value[1]))
def getCountriesPerTournament(matches):
"""returns tournaments where many players of the same country participated.
currently the function operates on challenger matches.
parameters in the next line may be changed to do the same for ATP tour level matches (no guarantee that it works without any further modifications)"""
matches = matches[(matches['tourney_level'] == 'C') & (matches['round'] == 'R32')]
tgroup = matches.groupby('tourney_id')
res = {}
for tid, tmatches in tgroup:
name = tid + '-' + tmatches.iloc[[0]]['tourney_name'].values[0]
#print(name)
#get all winner countries into a set
w_list = list(tmatches['winner_ioc'])
l_list = list(tmatches['loser_ioc'])
u_list = w_list + l_list
top = collections.Counter(u_list).most_common(1)
res[name] = [top[0][0], top[0][1]]
#print(res)
#u_set contains all names of participating players
#u_set = w_set.union(l_set)
for key, value in sorted(res.items(), key=lambda e: e[1][1], reverse=True):
print(key+','+value[0]+','+str(value[1]))
def getRetsPerPlayer(atpmatches,qmatches,fmatches, activeplayers, active):
"""returns the retirements per player over his career"""
allmatcheslist = []
allmatcheslist.append(atpmatches)
allmatcheslist.append(qmatches)
allmatcheslist.append(fmatches)
allmatches = pd.concat(allmatcheslist)
allmatchesret_group = allmatches[allmatches['score'].str.contains('RET').fillna(False)].groupby('loser_name').size().order(ascending=False)
allmatcheslost_group = allmatches.groupby('loser_name').size().order(ascending=False)
allmatcheslost_group = allmatcheslost_group[allmatcheslost_group > 49]
merged = pd.concat([allmatcheslost_group,allmatchesret_group], axis=1, join = 'inner').reset_index().fillna(0)
merged.columns = ['name', 'losses' , 'ret']
merged['percentage'] = (merged['ret'] * 100 / merged['losses']).round(2)
merged = merged.sort(['percentage','losses'], ascending=False)
if (active):
activeplayerslist = [row[0] for row in activeplayers]
merged = merged[(merged['name'].isin(activeplayerslist))]
print(merged.to_csv(sys.stdout,index=False))
def youngestChallengerWinners(matches):
"""returns youngest challenger match winners"""
matches = matches[(matches['tourney_level'] == 'C') & (matches['round'] == "R32") & (matches['winner_age'] < 17)]
matches_grouped = matches.groupby('winner_name', as_index=False).apply(lambda g: g.loc[g.winner_age.idxmin()])
matches_grouped['winner_age'] = matches_grouped['winner_age'].round(1)
print(type(matches_grouped['winner_rank'][0]))
matches_grouped['loser_rank'] = matches_grouped['loser_rank'].fillna(0.0).astype('int')
matches_grouped['winner_rank'] = matches_grouped['winner_rank'].fillna(0.0).astype('int')
print(matches_grouped[['tourney_date','tourney_name','winner_name','winner_age', 'winner_rank', 'loser_name', 'loser_rank', 'loser_entry']].sort(['winner_age'], ascending=[True]).to_csv(sys.stdout,index=False, sep='\t'))
def getStreaks(atpmatches):
"""detects streaks in players' careers.
in the next lines some parameters can be changed."""
#how many wins allowed during the streak? only tested for 0 and 1.
#GAPS_ALLOWED=1
GAPS_ALLOWED=0
#max rank of player when the streak was started
#MAX_RANK=20
MAX_RANK=2000
#define streak length
MIN_STREAK_LENGTH = 20
#winning streak or losing streak?
#WINS = False
WINS = True
if (WINS):
NAME_COLUMN = 'winner_name'
RANK_COLUMN = 'winner_rank'
else:
NAME_COLUMN = 'loser_name'
RANK_COLUMN = 'loser_rank'
#change tourney_level in next line!
#atpmatches = atpmatches[(atpmatches['tourney_date'] >= 19900000) & (atpmatches['tourney_level'] != 'D')]
atpmatches = atpmatches[(atpmatches['tourney_date'] >= 19900000) & (atpmatches['tourney_level'] == 'S')]
#counting wins and losses (and adding as a column) for each player (for filter later to increase speed of algorithm)
atpmatches['wfreq'] = atpmatches.groupby('winner_name')['winner_name'].transform('count')
atpmatches['lfreq'] = atpmatches.groupby('loser_name')['loser_name'].transform('count')
#only include players with more than 30 wins and 10 losses (to increase algorithm speed by reducing number of players)
wplayers = atpmatches[atpmatches['wfreq'] > 30]['winner_name'].tolist()
lplayers = atpmatches[atpmatches['lfreq'] > 10]['loser_name'].tolist()
players = set(wplayers+lplayers)
streaks = []
for player in players:
playerFullName = player
matches = atpmatches[((atpmatches['winner_name'] == playerFullName) | (atpmatches['loser_name'] == playerFullName))]
matches['round'] = pd.Categorical(matches['round'], categories = ["RR", "R128", "R64", "R32", "R16", "QF", "SF", "F", "W"])
matches = matches.sort(['tourney_date', 'round'])
wins_cnt=0
streak = 0
for i in range(len(matches)):
#get losing streak
if (matches.iloc[i][NAME_COLUMN] == playerFullName):
if (streak == 0):
startdate = matches.iloc[i]['tourney_date']
startrank = matches.iloc[i][RANK_COLUMN]
streak = streak+1
else:
#win
wins_cnt=wins_cnt+1
if (wins_cnt==1):
win_pos = i+1
if (wins_cnt > GAPS_ALLOWED):
if (streak > MIN_STREAK_LENGTH):
streaks.append([playerFullName,startdate,startrank,streak,GAPS_ALLOWED])
streak = 0
wins_cnt=0
i = win_pos+1
#old version of the function
# for index, match in matches.iterrows():
# #get losing streak
# if (match['loser_name'] == playerFullName):
# if (streak == 0):
# startdate = match['tourney_date']
# startrank = match['loser_rank']
# streak = streak+1
# tempmatch = match
# continue
# else:
# #save streak because of win
# if (streak > 6):
# streaks.append([playerFullName,startdate,startrank,streak])
# streak = 0
# continue
#to get ongoing streaks or players who ended career with a losing streak
if (streak > MIN_STREAK_LENGTH):
streaks.append([playerFullName,startdate,startrank,streak,GAPS_ALLOWED])
streak = 0
#do some styling (for streak-starts where we dont have a ranking (possibly due to WC awarded) we enter 9999 as a streak-ranking-start
#so in order to include them MAX_RANK needs to be set accordingly
streaks = [[row[0],row[1],9999 if math.isnan(row[2]) else int(row[2]),row[3],row[4]] for row in streaks]
#sort by date first because it's secondary sort index
streaks.sort(key=itemgetter(1),reverse=True)
#sort by streak length
streaks.sort(key=itemgetter(3),reverse=True)
streaks = [y for y in streaks if int(y[2]) <= MAX_RANK]
for streak in streaks:
print(streak[0]+','+str(streak[1])+','+str(streak[2])+','+str(streak[3])+','+str(streak[4]))
def get1seedWinners(matches):
"""calculates how often the first seed won an ATP tournament"""
fmatches = matches[(matches['tourney_level'] == 'A') & (matches['round'] == 'F') & (matches['tourney_date'] >= 19910000)]
wseed1matches = fmatches[(fmatches['winner_seed'] == 1)]
print(len(fmatches))
print(len(wseed1matches))
matches = matches[(matches['tourney_level'] == 'A') & (matches['tourney_date'] >= 19910000)]
cntfirstseedstten = 0
firstseedsttenwinner = 0
cntfirstseedgtten = 0
firstseedgttenwinner = 0
cntfirstseedgttwenty = 0
firstseedgttwentywinner = 0
tourney_group = matches.groupby('tourney_id')
for tname, tdf in tourney_group:
#print(tname)
if (len(tdf[(tdf['winner_seed'] == 1)] > 0)):
firstseedrank = tdf[(tdf['winner_seed'] == 1)].iloc[0]['winner_rank']
elif (len(tdf[(tdf['loser_seed'] == 1)] > 0)):
firstseedrank = tdf[(tdf['loser_seed'] == 1)].iloc[0]['loser_rank']
if not (math.isnan(firstseedrank)):
if (firstseedrank < 11):
cntfirstseedstten+=1
if (len(tdf[(tdf['winner_seed'] == 1) & (tdf['round'] == 'F')] > 0)):
firstseedsttenwinner+=1
if (firstseedrank > 10):
cntfirstseedgtten+=1
if (len(tdf[(tdf['winner_seed'] == 1) & (tdf['round'] == 'F')] > 0)):
firstseedgttenwinner+=1
if (firstseedrank > 20):
cntfirstseedgttwenty +=1
if (len(tdf[(tdf['winner_seed'] == 1) & (tdf['round'] == 'F')] > 0)):
firstseedgttwentywinner+=1
print('cntfirstseedstten: ' + str(cntfirstseedstten))
print('firstseedsttenwinner: ' + str(firstseedsttenwinner))
print('cntfirstseedgtten: ' + str(cntfirstseedgtten))
print('firstseedgttenwinner: ' + str(firstseedgttenwinner))
print('cntfirstseedgttwenty: ' + str(cntfirstseedgttwenty))
print('firstseedgttwentywinner: ' + str(firstseedgttwentywinner))
def getseedWinners(matches):
"""calculates how often the seeds won an ATP tournament"""
groundmatches = matches[(matches['tourney_level'] == 'A') & (matches['round'] == 'F') & (matches['tourney_date'] >= 19910000) & ((matches['draw_size'] == 32) | (matches['draw_size'] == 28))]
wseed1matches = groundmatches[(groundmatches['winner_seed'] == 1)]
wseed2matches = groundmatches[(groundmatches['winner_seed'] == 2)]
wseed3matches = groundmatches[(groundmatches['winner_seed'] == 3)]
wseed4matches = groundmatches[(groundmatches['winner_seed'] == 4)]
wseed5matches = groundmatches[(groundmatches['winner_seed'] == 5)]
wseed6matches = groundmatches[(groundmatches['winner_seed'] == 6)]
wseed7matches = groundmatches[(groundmatches['winner_seed'] == 7)]
wseed8matches = groundmatches[(groundmatches['winner_seed'] == 8)]
wunseedseedmatches = groundmatches[(groundmatches['winner_seed'].isnull())]
print(len(groundmatches))
print('{} - {}{}'.format('1 seed', np.round(len(wseed1matches)*100/len(groundmatches), decimals=2),'%'))
print('{} - {}{}'.format('2 seed', np.round(len(wseed2matches)*100/len(groundmatches), decimals=2),'%'))
print('{} - {}{}'.format('3 seed', np.round(len(wseed3matches)*100/len(groundmatches), decimals=2),'%'))
print('{} - {}{}'.format('4 seed', np.round(len(wseed4matches)*100/len(groundmatches), decimals=2),'%'))
print('{} - {}{}'.format('5 seed', np.round(len(wseed5matches)*100/len(groundmatches), decimals=2),'%'))
print('{} - {}{}'.format('6 seed', np.round(len(wseed6matches)*100/len(groundmatches), decimals=2),'%'))
print('{} - {}{}'.format('7 seed', np.round(len(wseed7matches)*100/len(groundmatches), decimals=2),'%'))
print('{} - {}{}'.format('8 seed', np.round(len(wseed8matches)*100/len(groundmatches), decimals=2),'%'))
print('{} - {}{}'.format('unseeded', np.round(len(wunseedseedmatches)*100/len(groundmatches), decimals=2),'%'))
print('------')
def highestRankedAustriansInR16(matches):
"""returns the highest ranked austrians in an R16 of an ATP tournament.
parameters in the next lines can be changed to make it work for different rounds and countries."""
smatches = matches[(matches['tourney_level'] == 'A') & (matches['round'] =='R32') & (matches['winner_ioc'] =='AUT') & (matches['winner_rank'] > 300) & ((matches['draw_size'] == 28) | (matches['draw_size'] == 32))]
bmatches = matches[(matches['tourney_level'] == 'A') & (matches['round'] =='R64') & (matches['winner_ioc'] =='AUT') & (matches['winner_rank'] > 300) & ((matches['draw_size'] == 56) | (matches['draw_size'] == 48) | (matches['draw_size'] == 64))]
mergelist = [smatches, bmatches]
matches = pd.concat(mergelist)
matches = matches.sort(['winner_rank'], ascending=False)
print(matches[['tourney_name','tourney_date','winner_name', 'winner_rank', 'loser_name', 'loser_rank', 'loser_entry']].to_csv(sys.stdout,index=False))
def mostRetsInTourneyPerPlayer(matches):
"""returns tournaments where a player benefitted of a RET or W.O. more than once"""
matches = matches[(matches['tourney_level'] == 'A') | (matches['tourney_level'] == 'M') | (matches['tourney_level'] == 'G')]
matches['score'].astype('str')
matches['count'] = matches.groupby(['tourney_id', 'winner_name'])['score'].transform(lambda x: x[(x.str.contains('RET|W').fillna(False))].count())
matches = matches[(matches['count'] > 1)]
print(matches[['tourney_name','tourney_date','winner_name', 'count']].drop_duplicates().to_csv(sys.stdout,index=False))
def mostWCs(matches):
"""finds players with most WCs"""
matches = matches[(matches['tourney_date'] > 20140000)]
matches = matches[(matches['tourney_level'] == 'A') | (matches['tourney_level'] == 'M')| (matches['tourney_level'] == 'G')]
matches = matches[(matches['winner_entry'] == 'WC') | (matches['loser_entry'] == 'WC')]
wcw_group = matches.groupby(['tourney_id', 'winner_name']).apply(lambda x: (x['winner_entry'] == 'WC').sum())
wcl_group = matches.groupby(['tourney_id', 'loser_name']).apply(lambda x: (x['loser_entry'] == 'WC').sum())
scores = pd.DataFrame({'wcw' : wcw_group, 'wcl': wcl_group}).fillna(0)
scores[['wcw', 'wcl']] = scores[['wcw', 'wcl']].astype(int)
scores.index.names = ['tourney','pname']
scores['wcs'] = scores['wcw'] + scores['wcl']
scores = scores[(scores['wcs'] > 0)]
scores = scores.groupby('pname')
# scores[['wins', 'losses', 'rets']] = scores[['wins', 'losses', 'rets']].astype(int)
# scores.index.names = ['year','pname']
# scores = scores[(scores['rets'] > 3)]
# scores['matches'] = scores['wins']+scores['losses']
# scores['percentage'] = np.round((scores['rets'] * 100 / scores['wins']).astype(np.double), decimals=2)
# scores = scores.reindex_axis(['matches', 'wins','losses','rets','percentage'], axis=1)
# scores = scores.sort(['rets'], ascending=False)
print(scores.to_csv(sys.stdout,index=True))
def mostRetsPerYear(matches):
"""finds players with most RETs received per year"""
matches = matches[(matches['tourney_level'] == 'A') | (matches['tourney_level'] == 'M')| (matches['tourney_level'] == 'G')]
matches['score'].astype('str')
matches['tourney_date'].apply(str)
matches['year'] = matches['tourney_date'].astype(str)
matches['year'] = matches['year'].str[:4]
w_group = matches.groupby(['year', 'winner_name']).size()
l_group = matches.groupby(['year', 'loser_name']).size()
#ret_group = matches.groupby(['year', 'winner_name']).apply(lambda x: (x['score'].str.contains('RET|W').fillna(False)).sum())
ret_group = matches.groupby(['year', 'winner_name']).apply(lambda x: (x['score'].str.contains('RET').fillna(False)).sum())
scores = pd.DataFrame({'wins' : w_group, 'losses' : l_group, 'rets' : ret_group}).fillna(0)
scores[['wins', 'losses', 'rets']] = scores[['wins', 'losses', 'rets']].astype(int)
scores.index.names = ['year','pname']
scores = scores[(scores['rets'] > 3)]
scores['matches'] = scores['wins']+scores['losses']
scores['percentage'] = np.round((scores['rets'] * 100 / scores['wins']).astype(np.double), decimals=2)
scores = scores.reindex_axis(['matches', 'wins','losses','rets','percentage'], axis=1)
scores = scores.sort(['rets'], ascending=False)
print(scores.to_csv(sys.stdout,index=True))
def oldestWinnerATP(atpmatches,qmatches):
"""returns tournaments with old match winners"""
mergelist = [atpmatches, qmatches]
matches = pd.concat(mergelist)
matches = matches[(matches['tourney_level'] == 'A') | (matches['tourney_level'] == 'Q') | (matches['tourney_level'] == 'M') | (matches['tourney_level'] == 'G')]
matches = matches[(matches['winner_age']) > 38]
#matches = matches[(matches['tourney_date']) > 19903000]
matches = matches.sort(['winner_age'], ascending=False)
print(matches[['tourney_name', 'tourney_date', 'round', 'winner_name', 'winner_age', 'loser_name', 'score']].drop_duplicates().to_csv(sys.stdout,index=False))
def bestNonChampion(players,ranks):
"""finds highest ranked players without a title"""
players.columns = ['id', 'fname', 'lname', 'hand', 'dob', 'country']
players['full_name'] = players['fname'] + ' ' + players['lname']
ranks.columns = ['ranking_date', 'rank', 'player_id', 'points']
ranks = ranks[(ranks['rank'] < 41)]
join = pd.merge(ranks,players, left_on='player_id', right_on='id')
join["fullname"] = join["fname"] + ' ' + join["lname"]
join = join[(join['ranking_date'] > datetime.date(1991, 1, 1))]
#join['tournament_wins'] = join.apply(lambda x: len(matches[(matches['tourney_date'] < x['ranking_date']) & (matches['winner_name'] == x['fullname'])]), axis=1)
join = join.groupby('fullname').apply(get_date_wins) #groupby to increase speed compared to previous line
join = join[(join['tournament_wins'] == 0)].sort(['rank'], ascending=True)
print(join[['fullname', 'ranking_date', 'rank', 'tournament_wins']].to_csv(sys.stdout,index=False))
def getZeroBreakPointChampions(atpmatches):
"""finds tournament winners who faces zero break points over the course of a tournament"""
matches = atpmatches[((atpmatches['tourney_level'] == 'A') | (atpmatches['tourney_level'] == 'G') | (atpmatches['tourney_level'] == 'M')) & (atpmatches['tourney_date'] >= 19910000)]
#matches = matches[(matches['tourney_id'] == '2015-891') | (matches['tourney_id'] == '2015-407')]
matches['w_broken'] = matches['w_bpFaced'] - matches['w_bpSaved']
matches = matches.reset_index().groupby('tourney_id').apply(get_winner_name)
matches = matches[(matches['winner_name'] == matches['twname'])]
matches['sum_broken'] = matches.groupby('tourney_id')['w_broken'].transform(np.sum)
matches = matches.sort(['sum_broken','tourney_date'], ascending=[True,False])
print(matches[['tourney_id', 'tourney_name', 'tourney_level', 'winner_name', 'sum_broken']].drop_duplicates().to_csv(sys.stdout,index=False))
def get_winner_name(in_group):
"""helper function"""
try:
wname = in_group[(in_group['round'] == 'F')].iloc[[0]]['winner_name'].values[0]
in_group['twname'] = wname
except IndexError:
in_group['twname'] = 'none'
return in_group
def easiestOpponents(atpmatches):
"""finds players who had the highest ranked opponents over a complete tournament"""
matches = atpmatches[(atpmatches['tourney_level'] == 'G')]
matches = matches[(matches['round'] == 'R128') | (matches['round'] == 'R64') | (matches['round'] == 'R32') | (matches['round'] == 'R16')]
#filter players who played against Q or WC (who potentially have high rankings
#matches = matches[(matches['loser_entry'] != 'WC') & (matches['loser_entry'] != 'Q') & (matches['loser_entry'] != 'LL')]
matches = matches.groupby(['tourney_date','winner_name']).filter(lambda x: len(x) > 3)
matches['ranksum'] = matches.groupby(['tourney_date','winner_name'])['loser_rank'].transform(lambda x: x.sum())
matches = matches[(matches['ranksum'] > 450)]
matches = matches.sort(['tourney_date','winner_name'], ascending=True)
print(matches[['tourney_name','tourney_date','winner_name', 'round', 'loser_name', 'loser_rank', 'loser_entry', 'ranksum']].drop_duplicates().to_csv(sys.stdout,index=False))
def wcwinner(qmatches):
"""finds Q winners who were WCs"""
matches = qmatches[(qmatches['round'] == 'F') & (qmatches['winner_entry'] == 'WC')]
#filter out seeded WCs
matches = matches[(matches['winner_seed'].isnull())]
print(matches[['tourney_name','tourney_date','winner_name', 'winner_entry', 'winner_rank']].to_csv(sys.stdout,index=False))
def titlesataage(atpmatches):
"""calculates how many titles a player had at a certain age"""
matches = atpmatches[(atpmatches['round'] == 'F') & (atpmatches['winner_age'] < 22.5)]
matches['titles'] = matches.groupby('winner_name')['winner_name'].transform('count')
matches = matches[(matches['titles'] > 3)]
print(matches[['winner_name', 'titles']].drop_duplicates().to_csv(sys.stdout,index=False))
def get_streaks(x):
"""helper function"""
#x['streak'] = x.groupby( (x['l_breaks'] != 0).cumsum()).cumcount() + ( (x['l_breaks'] != 0).cumsum() == 0).astype(int)
x=x.reset_index()
for i, row in x.iterrows():
#print(i)
if i != 0:
if row['l_breaks'] == 0:
x.ix[i, 'streak'] = x.ix[i-1, 'streak'] + 1
else:
if row['l_breaks'] == 0:
x.ix[i, 'streak'] = 1
return x
def get_streaks2(df):
"""helper function"""
df=df.reset_index()
df['streak2'] = (df['l_breaks'] == 0).cumsum()
df['cumsum'] = np.nan
df.loc[df['l_breaks'] != 0, 'cumsum'] = df['streak2']
df['cumsum'] = df['cumsum'].fillna(method='ffill')
df['cumsum'] = df['cumsum'].fillna(0)
df['streak'] = df['streak2'] - df['cumsum']
df.drop(['streak2', 'cumsum'], axis=1, inplace=True)
return df
def consecutivlosseswithoutbreaks(atpmatches):
"""finds matches where players had consecutive losses without getting broken"""
#atpmatches = atpmatches[(atpmatches['loser_name'] == 'John Isner')]
atpmatches = atpmatches.sort('tourney_date')
atpmatches['l_breaks'] = atpmatches['l_bpFaced']-atpmatches['l_bpSaved']
atpmatches['streak'] = 0
atpmatches = atpmatches.groupby('loser_name').apply(get_streaks2)
atpmatches = atpmatches[(atpmatches['streak'] >1)]
atpmatches = atpmatches.sort('tourney_date')
print(atpmatches[['tourney_date', 'tourney_name','winner_name', 'loser_name', 'score', 'l_bpSaved', 'l_bpFaced', 'streak']].to_csv(sys.stdout,index=False))
def curse(row):
"""helper function"""
if row['previous_loser'] == 'Rafael Nadal':
if row['previous_winner'] == row['winner_name']:
val = 0
elif row['previous_winner'] != row['winner_name']:
val = 1
else:
val = -1
return val
def findnadals(group):
"""helper function"""
#print(group.iloc[[0]]['tourney_date'].values[0])
group = group.sort('rank')
group['previous_loser'] = group['loser_name'].shift(1)
group['previous_winner'] = group['winner_name'].shift(1)
group = group[(group['winner_name'] == 'Rafael Nadal') | (group['loser_name'] == 'Rafael Nadal') | (group['previous_loser'] == 'Rafael Nadal')]
group.loc[group['previous_loser'] != 'Rafael Nadal', 'previous_loser'] = ''
group = group[(group['previous_loser'] == 'Rafael Nadal') | ((group['loser_name'] == 'Rafael Nadal') & (group['round'] != 'F'))]
if (len(group) > 0):
group['curse'] = group.apply(curse, axis=1)
#print(group[['tourney_date', 'tourney_name', 'round', 'winner_name', 'loser_name', 'previous_loser', 'curse', 'score']].to_csv(sys.stdout,index=False))
#print('in group type: ' + str(type(group)))
return group
def losetonadalafterwin(atpmatches):
"""finds matches of players who lost to nadal after they beat him (nadal curse)"""
round_dict = { "R16": 9,
"W": 13,
"F": 12,
"RR": 8,
"R64": 6,
"R128": 5,
"QF": 10,
"SF": 11,
"R32": 7
};
atpmatches = atpmatches[(atpmatches['tourney_date'] > datetime.date(1995,1,1))]
w_set = set(atpmatches['winner_name'])
l_set = set(atpmatches['loser_name'])
namelist = w_set.union(l_set)
namelist = sorted(namelist)
namelist.remove('Novak Djokovic')
namelist.remove('Andy Murray')
namelist.remove('Roger Federer')
namelist.remove('Rafael Nadal')
#namelist = namelist.remove('Novak Djokovic'), 'Roger Federer', 'Andy Murray'])
#namelist = ['Borna Coric', 'Steve Darcis']
atpmatches['rank'] = atpmatches['round'].map(round_dict)
atpmatches = atpmatches.sort('tourney_date')
#get list of nadal tournaments
nadal_tourneys = atpmatches[(atpmatches['winner_name'] == 'Rafael Nadal') | (atpmatches['loser_name'] == 'Rafael Nadal')]['tourney_id']
nadal_tourneys = nadal_tourneys.tolist()
resultmatches = pd.DataFrame()
for name in namelist:
print(name)
matches = atpmatches[((atpmatches['winner_name'] == name) | (atpmatches['loser_name'] == name)) & atpmatches['tourney_id'].isin(nadal_tourneys)]
matches = matches.groupby(['tourney_id']).apply(findnadals)
resultmatches = resultmatches.append(matches)
resultmatches['curse'] = resultmatches['curse'].astype(int)
resultmatches = resultmatches.sort(['tourney_date', 'rank'], ascending=[True,True])
print(resultmatches[['tourney_date', 'tourney_name', 'round', 'winner_name', 'loser_name', 'previous_loser', 'curse', 'score']].drop_duplicates().to_csv(sys.stdout,index=False))
print(resultmatches[['tourney_date', 'tourney_name', 'round', 'winner_name', 'loser_name', 'curse', 'score']].drop_duplicates().to_csv(sys.stdout,index=False))
def countseeds(group):
"""helper function"""
group['cnt'] = len(group[(group['loser_seed'] < 6) | (group['winner_seed'] < 6)])
#print(len(group[(group['loser_seed'] < 6) | (group['winner_seed'] < 6)]))
#print('--')
return group
def fouroffiveseedsgone(atpmatches):
"""finds tournaments where four of five seeds lost until R16"""
matches = atpmatches[(atpmatches['tourney_level'] == 'M') & (atpmatches['round'] == 'R16')]
matches = matches.reset_index().groupby(['tourney_id']).apply(countseeds)
matches = matches[(matches['cnt'] < 2)]
matches['url'] = 'http://www.protennislive.com/posting/' +matches['tourney_date'].dt.strftime('%Y') + '/' + matches['tourney_id'].str[-3:] + '/mds.pdf'
print(matches[['tourney_date', 'tourney_name', 'cnt', 'url']].drop_duplicates().to_csv(sys.stdout,index=False))
def createOpponentCol(x,name):
"""helper function"""
if (x['winner_name'] == name):
return x['loser_name']
else:
return x['winner_name']
def createOpponent2Col(x,name):
"""helper function"""
if (x['winner_name'] == name):
return 1
else:
return 0
def lossStreaks(group):
"""helper function"""
group = group.sort('tourney_date')
group['streak2'] = (group['opponent_loss'] == 0).cumsum()
group['cumsum'] = np.nan
group.loc[group['opponent_loss'] == 1, 'cumsum'] = group['streak2']
group['cumsum'] = group['cumsum'].fillna(method='ffill')
group['cumsum'] = group['cumsum'].fillna(0)
group['streak'] = group['streak2'] - group['cumsum']
group['streak'] = group['streak'].astype('int')
group = group[group['streak'] != 0]
#group.drop(['streak2', 'cumsum'], axis=1, inplace=True)
group = group.groupby('cumsum').filter(lambda x: len(x) > 1)
if (len(group) > 1):
print(group[['tourney_date', 'tourney_name', 'winner_name', 'loser_name', 'score', 'streak']].to_csv(sys.stdout,index=False))
return group
def backtobacklosses(atpmatches,name):
"""finds back to back losses"""
matches=atpmatches[(atpmatches['winner_name'] == name) | (atpmatches['loser_name'] == name)]
matches['opponent_name'] = matches.apply(lambda x: createOpponentCol(x, name), axis=1)
matches['opponent_loss'] = matches.apply(lambda x: createOpponent2Col(x, name), axis=1)
#matches = matches[matches['opponent_name'] == 'Novak Djokovic']
matches = matches.reset_index().groupby('opponent_name').apply(lossStreaks)
def titlesdefended(atpmatches):
"""calculates how often titles were successfully defended"""
matches = atpmatches[(atpmatches['tourney_level'] == 'A')]
matches['rawid'] = matches['tourney_id'].str[5:]
matches1415 = matches[((matches['tourney_date'] < 20140000) & (matches['tourney_date'] > 20111200))]
matches1415['tourney_date'] = matches1415['tourney_date'].astype('str')
#TODO: check if winner of 2014 is also playing in 2015. if yes: keep tournament, if not, drop it!
matches1415 = matches1415.groupby(matches1415['rawid'])#.filter(lambda x: len(x['tourney_name']) == 2)
#print(len(matches1415))
defender_tourneys = matches1415.apply(tryingtodefend)
print(len(defender_tourneys))
defender_tourneys = defender_tourneys[defender_tourneys['del'] == 0]
print(len(defender_tourneys))
defender_tourneys = defender_tourneys[['rawid','triedtodefend','successfullydefended','madeituntil']]
print(len(defender_tourneys))
defender_tourneys = defender_tourneys.drop_duplicates()
print('{} {}'.format('tournaments:', len(defender_tourneys)))
print('{} {}'.format('triedtodefend:', len(defender_tourneys[defender_tourneys['triedtodefend'] == 1])))
print('{} {}'.format('successfullydefended:', len(defender_tourneys[defender_tourneys['successfullydefended'] == 1])))
print('{} {}'.format('% triedtodefend:', np.round(len(defender_tourneys[defender_tourneys['triedtodefend'] == 1])*100/len(defender_tourneys)),2))
print('{} {}'.format('% successfullydefended of all tournaments:', np.round(len(defender_tourneys[defender_tourneys['successfullydefended'] == 1])*100/len(defender_tourneys)),2))
print('{} {}'.format('% successfullydefended of tries:', np.round(len(defender_tourneys[defender_tourneys['successfullydefended'] == 1])*100/len(defender_tourneys[defender_tourneys['triedtodefend'] == 1])),2))
print('{} {}'.format('until F:', np.round(len(defender_tourneys[defender_tourneys['madeituntil'] == 'F'])*100/len(defender_tourneys[defender_tourneys['triedtodefend'] == 1])),2))
print('{} {}'.format('until SF:', np.round(len(defender_tourneys[defender_tourneys['madeituntil'] == 'SF'])*100/len(defender_tourneys[defender_tourneys['triedtodefend'] == 1])),2))
print('{} {}'.format('until QF:', np.round(len(defender_tourneys[defender_tourneys['madeituntil'] == 'QF'])*100/len(defender_tourneys[defender_tourneys['triedtodefend'] == 1])),2))
print('{} {}'.format('until R16:', np.round(len(defender_tourneys[defender_tourneys['madeituntil'] == 'R16'])*100/len(defender_tourneys[defender_tourneys['triedtodefend'] == 1])),2))
print('{} {}'.format('until R32:', np.round(len(defender_tourneys[defender_tourneys['madeituntil'] == 'R32'])*100/len(defender_tourneys[defender_tourneys['triedtodefend'] == 1])),2))
print('{} {}'.format('until R64:', np.round(len(defender_tourneys[defender_tourneys['madeituntil'] == 'R64'])*100/len(defender_tourneys[defender_tourneys['triedtodefend'] == 1])),2))
#defender_tourneys.groupby['rawid']
#print(defender_tourneys[['rawid','tourney_id','triedtodefend','successfullydefended','madeituntil']].to_csv(sys.stdout,index=False))
#matches1415 = matches1415.groupby(matches1415['rawid']).apply(defending)
def tryingtodefend(group):
"""helper function"""
try:
#print(group.name)
#print(group.iloc[[0]]['tourney_name'])
#print(len(group))
#print('----')
group = group.sort('tourney_date', ascending=True)
group_copy = group[(group['round'] == 'F')]
#get new/old year
newer_year = str(group_copy.iloc[[len(group_copy)-1]]['tourney_date'].values[0])[:4]
older_year = str(group_copy.iloc[[0]]['tourney_date'].values[0])[:4]
oldwinnername = group_copy.iloc[[0]]['winner_name'].values[0]
tourneyname = group_copy.iloc[[0]]['tourney_name'].values[0]
# print(oldwinnername)
# print(older_year)
# print(newer_year)
# print(tourneyname)
# print(len(group_copy))
#del because tournament was only held in one year
if (len(group_copy) == 1):
group['del'] = 1
else:
group['del'] = 0
#delete big four
if ((oldwinnername == 'Rafael Nadal') | (oldwinnername == 'Andy Murray') | (oldwinnername == 'Novak Djokovic') | (oldwinnername == 'Roger Federer')):
group['del'] = 1
#does oldwinner play in new year?
newmatches = group[group['tourney_date'].str.startswith(newer_year) & ((group['loser_name'] == oldwinnername) | (group['winner_name'] == oldwinnername))]
if (len(newmatches) == 0):
group['triedtodefend'] = 0
group['successfullydefended'] = 0
group['madeituntil'] = 0
else:
group['triedtodefend'] = 1
#successfully defended?
if (len(newmatches[(newmatches['round'] == 'F') & (newmatches['winner_name'] == oldwinnername)]) > 0):
group['successfullydefended'] = 1
group['madeituntil'] = 'W'
else:
group['successfullydefended'] = 0
if (len(newmatches[(newmatches['round'] == 'F') & (newmatches['loser_name'] == oldwinnername)]) > 0):
group['madeituntil'] = 'F'
elif (len(newmatches[(newmatches['round'] == 'SF') & (newmatches['loser_name'] == oldwinnername)]) > 0):
group['madeituntil'] = 'SF'
elif (len(newmatches[(newmatches['round'] == 'QF') & (newmatches['loser_name'] == oldwinnername)]) > 0):
group['madeituntil'] = 'QF'
elif (len(newmatches[(newmatches['round'] == 'R16') & (newmatches['loser_name'] == oldwinnername)]) > 0):
group['madeituntil'] = 'R16'
elif (len(newmatches[(newmatches['round'] == 'R32') & (newmatches['loser_name'] == oldwinnername)]) > 0):
group['madeituntil'] = 'R32'
elif (len(newmatches[(newmatches['round'] == 'R64') & (newmatches['loser_name'] == oldwinnername)]) > 0):
group['madeituntil'] = 'R64'
#print(group[['tourney_id','tourney_name','triedtodefend','successfullydefended','madeituntil']].drop_duplicates().to_csv(sys.stdout,index=False))
#newwinnername = group_copy.iloc[[1]]['winner_name'].values[0]
#print(newwinnername)
#get list of all new players
#print(group[['tourney_date', 'round', 'winner_name', 'prev_winner_winner','prev_winner_runnerup', 'deftitle', 'defrunnerup']].to_csv(sys.stdout,index=False))
#print(group[['tourney_date', 'round', 'winner_name']].to_csv(sys.stdout,index=False))
return group
except IndexError:
group['include'] = False
return group
def defending(group):
"""helper function"""
#print(group.name)
#print(group.iloc[[0]]['tourney_name'])
#print(len(group))
print('----')
group = group.sort('tourney_date', ascending=True)
group['prev_winner_winner'] = group['winner_name'].shift()
group['prev_winner_runnerup'] = group['loser_name'].shift()
group['deftitle'] = group.apply(f, axis=1)
group['defrunnerup'] = group.apply(g, axis=1)
print(group[['rawid', 'winner_name', 'prev_winner_winner','prev_winner_runnerup', 'deftitle', 'defrunnerup']].to_csv(sys.stdout,index=False))
return group
def f(row):
"""helper function"""
if row['prev_winner_winner'] == row['winner_name']:
val = 1
else:
val = 0
return val
def g(row):
"""helper function"""
if row['prev_winner_runnerup'] == row['winner_name']:
val = 1
else:
val = 0
return val
def titlessurface(atpmatches):
"""calculates titles per surface"""
matches = atpmatches[atpmatches['round'] == 'F']
matches['year'] = matches.tourney_id.str[:4]
matches['tourney_date'] =matches['tourney_date'].astype('str')
matches['month'] = matches.tourney_date.str[4:6]
matches['month'] = matches['month'].astype('int')
matches = matches[matches['month'] < 7]
matches = matches.reset_index().groupby(['year','winner_name'])
surface_winners = matches.apply(calcsurfaces)
def calcsurfaces(group):
"""helper function"""
#print(group.iloc[[0]]['year'].values[0])
#print(group.iloc[[0]]['winner_name'].values[0])
#print(len(group[group['surface'] == "Clay"]))
#print(len(group[group['surface'] == "Grass"]))
#print(len(group[group['surface'] == "Hard"]))
if ((len(group[group['surface'] == "Clay"])) > 0) & ((len(group[group['surface'] == "Hard"])) > 0) & ((len(group[group['surface'] == "Grass"])) > 0):
print(group[['year','winner_name', 'surface','tourney_name', 'tourney_date']].to_csv(sys.stdout,index=False))
def matchesPerLastNameAndRound(matches):
"""finds matches of brothers"""
matches = matches[(matches['round']=='F') & (matches['tourney_date'] > 19900000)]
matches['winner_lastname'] = matches['winner_name'].str.split(' ').str[-1] + matches['winner_ioc']
matches['loser_lastname'] = matches['loser_name'].str.split(' ').str[-1] + matches['loser_ioc']
matches = matches.reset_index().groupby('tourney_id')
result = matches.apply(playernames)
result = result[(result['winner_brother'] == 1) | (result['loser_brother'] == 1)]
result2 = []
for index, row in result.iterrows():
output1=''
output2=''
if (row['winner_brother'] == 1) & (row['loser_brother'] == 1):
output1+=row['winner_name']
output2+=row['loser_name']
result2.append([row['tourney_name'], row['tourney_date'], output1,1])
result2.append([row['tourney_name'], row['tourney_date'], output2,1])
else:
if (row['winner_brother'] == 1):
output1+=row['winner_name']
if (row['loser_brother'] == 1):
output1+=row['loser_name']
result2.append([row['tourney_name'], row['tourney_date'], output1,0])
resultdf = pd.DataFrame(result2)
resultdf[4] = resultdf[2].shift(1)
resultdf = resultdf[1:]
resultdf = resultdf.iloc[::2]
resultdf = resultdf[[0,1,2,4,3]]
#print matches.to_string(columns=['tourney_name','tourney_date','winner_name', 'loser_name'])
print(resultdf.to_csv(sys.stdout,index=False))
def playernames(group):
"""helper function"""
group['unique_sum'] = len(group['winner_lastname'].unique()) + len(group['loser_lastname'].unique())
temp = [group['winner_lastname'], group['loser_lastname']]
mergelist = pd.concat(temp)
mergelist = mergelist[(mergelist != 'LopezESP') & (mergelist != 'JohanssonSWE') & (mergelist != 'CostaESP') & (mergelist != 'FerreiraRSA') ]
dups = mergelist.duplicated(keep=False)
dups2 = mergelist[mergelist.duplicated(keep=False)]
dups3 = dups2.str[0:-3]
group['winner_lastname'] = group['winner_lastname'].str[0:-3]
group['loser_lastname'] = group['loser_lastname'].str[0:-3]
group['winner_brother'] = np.where(group['winner_lastname'].isin(dups3), 1, 0)
group['loser_brother'] = np.where(group['loser_lastname'].isin(dups3), 1, 0)
return group
def bestNeverQFWin(matches, rankings,activeplayers):
"""finds players who never won a QF (+ streaks)"""
#matches = matches[matches['tourney_date'] > datetime.date(2012,12,29)]
qfmatches = matches[(matches['round']=='QF')]
qfmatches = qfmatches.sort_values(by='tourney_date')
qfgmatches = qfmatches.groupby('winner_name').first().reset_index()
#print(len(matches))
#print(matches[[ 'round', 'tourney_date','tourney_name']].head(1000))
#reused code start (would be better to make a function out of this)
global joinedrankingsdf
#join rankings with playernames
dirname = ".."
playersDB = dirname + "/atp_players.csv"
rankings.columns = ['date', 'rank', 'id','points']
rankings = rankings[rankings['rank'] < 100]
playersdf = pd.read_csv(playersDB,index_col=None,header=None,encoding = "ISO-8859-1")
playersdf.columns = ['id', 'fname', 'lname','hand','dob','country']
playersdf["fullname"] = playersdf["fname"] + ' ' + playersdf["lname"]
playersdf = playersdf.sort_values(by='fullname')
qfstreaks = pd.DataFrame()
#filter players to increase speed of following iteration
r16matches = matches[(matches['round']=='R16')]
r16players = set(r16matches['winner_name'])
filtered_players = playersdf[playersdf['fullname'].isin(r16players)]
print(len(playersdf))
print(len(filtered_players))
#calc initial losing streaks
for index, player in filtered_players.iterrows():
name = player['fullname']
print(name)
qfmatchesplayer = qfmatches[(qfmatches['winner_name'] == name) | (qfmatches['loser_name'] == name)]
qfmatchesplayer['player'] = name
#print(len(qfmatchesplayer))
if (len(qfmatchesplayer)) > 0:
streak = 1
for index, row in qfmatchesplayer.iterrows():
if (row['winner_name'] == row['player']):
streak=0
qfmatchesplayer.set_value(index,'streak',streak)
elif (row['loser_name'] == row['player']) & (streak==1):
qfmatchesplayer.set_value(index,'streak',streak)
else:
streak=0
qfmatchesplayer.set_value(index,'streak',streak)
qfmatchesplayer=qfmatchesplayer[qfmatchesplayer['streak']==1]
qfstreaks = qfstreaks.append(qfmatchesplayer)
#print(qfmatchesplayer[['player','winner_name', 'loser_name','streak']].to_csv(sys.stdout,index=False))
counts_df = pd.DataFrame(qfstreaks.groupby('player').size().sort_values().rename('counts'))
print(counts_df)
joinedrankingsdf = pd.merge(rankings,playersdf, on='id')
joinedrankingsdf["fullname"] = joinedrankingsdf["fname"] + ' ' + joinedrankingsdf["lname"]
#reused code end
res = []
#get all rankings for each player row
for index, match in qfgmatches.iterrows():
name = match['winner_name']
date = match['tourney_date']
print(name)
try:
counts = counts_df[counts_df.index == name]['counts'].values[0]
except:
counts = 0
playerres = joinedrankingsdf[(joinedrankingsdf['fullname'] == name) & (joinedrankingsdf['date'] <= date)]
try:
minrank = playerres.loc[playerres['rank'].idxmin()]
#print(minrank[['fullname','date', 'rank']])
res.append([minrank['fullname'], minrank['rank'], minrank['date'].strftime('%Y.%m.%d'),1,counts])
except:
continue
#add players who still did not win a QF
qfmatches = matches[(matches['round']=='QF')]
qfwinners = set(qfmatches['winner_name'])
qfmatches = qfmatches.groupby('loser_name').filter(lambda g: (len(g[(~g['loser_name'].isin(qfwinners))]) > 0))
counts_df = pd.DataFrame(qfmatches.groupby('loser_name').size().sort_values().rename('counts'))
for index, match in counts_df.iterrows():
name = match.name
counts = match['counts']
playerres = joinedrankingsdf[(joinedrankingsdf['fullname'] == name)]
try:
minrank = playerres.loc[playerres['rank'].idxmin()]
res.append([minrank['fullname'], minrank['rank'], minrank['date'].strftime('%Y.%m.%d'),0,counts])
except:
continue
res = sorted(res, key=itemgetter(1))
pprint.pprint(res)
activeplayerslist = [row[0] for row in activeplayers]
for r in res:
if (r[0] in activeplayerslist):
print("{: >20} {: >20} {: >20} {: >20} {: >20}".format(*r))
def listAllTimeNoQFWins(matches):
"""lists players who never won a QF"""
qfmatches = matches[(matches['round']=='QF')]
qfwinners = set(qfmatches['winner_name'])
qfmatches = qfmatches.groupby('loser_name').filter(lambda g: (len(g[(~g['loser_name'].isin(qfwinners))]) > 0))
counts = qfmatches.groupby('loser_name').size().sort_values()
print(counts)
def setstats(atpmatches):
"""for a player calculates specific set statistics"""
name='Gael Monfils'
matches=atpmatches[(atpmatches['winner_name'] == name) | (atpmatches['loser_name'] == name)]
matches=matches[matches['tourney_date'] > datetime.date(2014,12,28)]
#setfilter
matches = matches[(matches['score'].str.count('-') == 3) | (matches['score'].str.count('-') == 2)]
#norets
matches = matches[~matches['score'].str.contains('RET|W').fillna(False)]
matches['sets_analysis'] = matches.apply(analyzeSets, axis=1)
matches['sets_won'], matches['sets_lost'], matches['first'], matches['res'] = zip(*matches['sets_analysis'].map(lambda x: x.split(',')))
matches['sets_won'] = matches['sets_won'].astype('int')
matches['sets_lost'] = matches['sets_lost'].astype('int')
matches['first'] = matches['first'].astype('int')
print('sets won: ' + str(matches['sets_won'].sum()))
print('sets lost: ' + str(matches['sets_lost'].sum()))
print('first sets won: ' + str(matches['first'].sum()))
print('cb analysis:\n' + str(matches['res'].value_counts(sort=False)))
print('# of matches: ' + str(len(matches)))
#print(matches[['score','sets_won', 'sets_lost', 'first','winner_name', 'loser_name']].to_csv(sys.stdout,index=False))
def analyzeSets(row):
"""helper function"""
sets = row['score'].split(' ')
won=0
lost=0
first=0
res=0
print(sets)
for idx,set in enumerate(sets):
setscore = set.split('-')
if (len(setscore)>1):
#clean tb scores
if( '(' in setscore[0]):
setscore[0]=setscore[0][0]
if( '(' in setscore[1]):
setscore[1]=setscore[1][0]
if(row['winner_name'] == 'Gael Monfils'):
print('player winner')
if((int(setscore[0])>int(setscore[1])) & (int(setscore[0]) > 5)):
won=won+1
if(idx==0):
first=1
elif(int(setscore[0])<int(setscore[1])):
lost=lost+1
else:
print('player loser')
if((int(setscore[0])<int(setscore[1])) & (int(setscore[1]) > 5)):
won=won+1
if(idx==0):
first=1
elif((int(setscore[0])>int(setscore[1]))):
lost=lost+1
print(setscore)
#ersten gewonnen und gewonnen = 0
if ((first==1) & (won>lost)):
res=0
#ersten verloren und gewonnen = 1
if ((first==0) & (won>lost)):
res=1
#ersten gewonnen und verloren = 2
if ((first==1) & (won<lost)):
res=2
#ersten verloren und verloren = 3
if ((first==0) & (won<lost)):
res=3
print(str(won)+','+str(lost)+','+str(first)+','+str(res))
return(str(won)+','+str(lost)+','+str(first)+','+str(res))
def geth2hforplayerswrapper(atpmatches,qmatches):
"""helper function"""
#geth2hforplayer(atpmatches,"Roger Federer")
atpmatches = atpmatches.append(qmatches)
names = atpmatches[atpmatches['winner_rank'] < 100]
names = names.winner_name.unique()
for name in names:
geth2hforplayer(atpmatches,name)
def getwnonh2hs(atpmatches,qmatches,rankings):
"""calculates head to heads"""
#todo: could be extended to older players and also show career-overlap (e.g. were 10y together on tour)s
#make full matches df
atpmatches = atpmatches.append(qmatches)
global joinedrankingsdf
#join rankings with playernames
dirname = ".."
playersDB = dirname + "/atp_players.csv"
rankings.columns = ['date', 'rank', 'id','points']
playersdf = pd.read_csv(playersDB,index_col=None,header=None,encoding = 'iso-8859-1')
playersdf.columns = ['id', 'fname', 'lname','hand','dob','country']
joinedrankingsdf = pd.merge(rankings,playersdf, on='id')
joinedrankingsdf["fullname"] = joinedrankingsdf["fname"] + ' ' + joinedrankingsdf["lname"]
#get newest top-n rankings
joinedrankingsdf = joinedrankingsdf[(joinedrankingsdf.date == joinedrankingsdf.date.max()) & (joinedrankingsdf['rank'] < 51)]
#for each player in the rankings calculate the h2hs and then compare players in the h2h list with top-n players...
playernames = joinedrankingsdf['fullname']
playernameslist = playernames.tolist()
#dfs for calculating match-number of players
wins = atpmatches.groupby('winner_name').count()
losses = atpmatches.groupby('loser_name').count()
for player in playernames:
h2hlist = geth2hforplayer(atpmatches,player)
h2hnames = [row[0] for row in h2hlist]
noh2hs = [x for x in playernameslist if x not in h2hnames]
player1WL = losses[losses.index == player]['score'].values[0]+wins[wins.index == player]['score'].values[0]
#show rank for player in iteration and show string
for noh2h in noh2hs:
player2WL = losses[losses.index == noh2h]['score'].values[0]+wins[wins.index == noh2h]['score'].values[0]
if (noh2h != player):
print(player + ';' + str(joinedrankingsdf[(joinedrankingsdf['fullname'] == player)]['rank'].values[0]) + ';' + str(player1WL) + ';' + noh2h + ';' + str(joinedrankingsdf[(joinedrankingsdf['fullname'] == noh2h)]['rank'].values[0]) + ';' + str(player2WL))
def getTop100ChallengerPlayersPerWeek(qmatches):
"""finds top 100 challenger players per week"""
matches = qmatches[(qmatches['tourney_level'] == 'C') & (qmatches['round'] == 'R32') & (qmatches['tourney_date'] > datetime.date(2000,1,1))]
matches['top100'] = matches.apply(top100, axis=1)
matches['count'] = matches.groupby(['tourney_date'])['top100'].transform(lambda x: x.sum())
matches['tcount'] = matches.groupby(['tourney_date'])['tourney_name'].transform(lambda x: x.nunique())
matches = matches.sort(['count'], ascending=False)
print(matches[['tourney_date', 'count','tcount']].drop_duplicates().to_csv(sys.stdout,index=False))
#print(matches['top100'].head())
def top100(row):
"""helper function"""
if ((row['winner_rank'] < 101) & (row['loser_rank'] < 101)):
val = 2
elif ((row['loser_rank'] < 101) | (row['winner_rank'] < 101)):
val = 1
else:
val = 0
#print(val)
return val
def showTourneysOfDate(qmatches,year,month,day):
"""for a date shows the tournaments which were played at this date"""
matches = qmatches[(qmatches['tourney_level'] == 'S') & (qmatches['round'] == 'R32') & (qmatches['tourney_date'] == datetime.date(year,month,day))]
matches = matches[(matches['loser_rank'] < 151) | (matches['winner_rank'] < 151)]
print(matches[['tourney_id', 'winner_name', 'winner_rank','loser_name','loser_rank']].drop_duplicates().to_csv(sys.stdout,index=False))
print(matches[['tourney_date', 'tourney_name','tourney_id']].drop_duplicates().to_csv(sys.stdout,index=False))
def titles(matches):
"""calculates titles per player"""
matches = matches[(matches['round'] == 'F')]
matches['titles'] = matches.groupby('winner_name')['winner_name'].transform('count')
matches = matches[(matches['titles'] > 15)]
matches = matches.sort(['titles'], ascending=False)
print(matches[['winner_name', 'titles']].drop_duplicates().to_csv(sys.stdout,index=False))
def lowestRankedTitlists(matches):
"""finds the lowest ranked titlists"""
matches = matches[(matches['tourney_level'] == 'C') & (matches['round'] == 'F') & (matches['winner_rank'] > 600)]
matches = matches.sort(['tourney_date'], ascending=False)
matches['winner_rank'] = matches['winner_rank'].astype('int')
matches['winner_age'] = matches['winner_age'].round(2)
print(matches[['tourney_date', 'tourney_name', 'winner_name', 'winner_rank', 'winner_age']].to_csv(sys.stdout,index=False))
def gamesconcededpertitle(matches):
"""calculates how many games a player lost per title"""
matches=matches[matches['tourney_date'] > datetime.date(2000,1,1)]
matches = matches[(matches['tourney_level'] == 'S')]
matches['wcnt'] = matches.groupby(['tourney_id','winner_name'])['winner_name'].transform('count')
matches = matches[matches['wcnt'] == 5]
matches['sets_analysis'] = matches.apply(analyzeSetsFutures, axis=1)
matches['games_won'], matches['games_lost'], matches['rets'] = zip(*matches['sets_analysis'].map(lambda x: x.split(',')))
#convert columns to int for summing up
matches['games_won'] = matches['games_won'].astype('int')
matches['games_lost'] = matches['games_lost'].astype('int')
matches['rets'] = matches['rets'].astype('int')
#calculate the sum over each matches games
matches['games_won_t'] = matches.groupby(['tourney_id'])['games_won'].transform('sum')
matches['games_lost_t'] = matches.groupby(['tourney_id'])['games_lost'].transform('sum')
matches['rets_t'] = matches.groupby(['tourney_id'])['rets'].transform('sum')
#convert columns to int for summing up
matches['games_won_t'] = matches['games_won_t'].astype('int')
matches['games_lost_t'] = matches['games_lost_t'].astype('int')
matches['rets_t'] = matches['rets_t'].astype('int')
matches = matches.sort(['games_lost_t'], ascending=True)
print(matches[['tourney_id', 'winner_name', 'wcnt','games_won_t','games_lost_t','rets_t']].drop_duplicates().to_csv(sys.stdout,index=False))
def analyzeSetsFutures(row):
"""helper function"""
#6-4 6-7(5) 6-4
#setscore[0] sind die vom sieger, setscore[1] sind die vom verlierer
try:
sets = row['score'].split(' ')
gameswonwinner=0
gameslostwinner=0
retcount=0
if 'RET' in row['score']: retcount=1
if 'W' in row['score']: retcount=1
#print(sets)
for idx,set in enumerate(sets):
setscore = set.split('-')
if (len(setscore)>1):
#clean tb scores
if( '(' in setscore[0]):
setscore[0]=setscore[0][0]
if( '(' in setscore[1]):
setscore[1]=setscore[1][0]
gameswonwinner=gameswonwinner+int(setscore[0])
gameslostwinner=gameslostwinner+int(setscore[1])
#print(str(gameswonwinner)+','+str(gameslostwinner)+','+str(retcount))
return(str(gameswonwinner)+','+str(gameslostwinner)+','+str(retcount))
except:
return(str(0)+','+str(0)+','+str(0))
def lastTimeGrandSlamCountry(atpmatches):
"""grand slam results per country"""
matches=atpmatches[(atpmatches['tourney_level'] == 'G') & ((atpmatches['winner_ioc'] == 'NOR') | (atpmatches['loser_ioc'] == 'NOR'))]
matches = matches.sort(['tourney_date'], ascending=True)
print(matches[['tourney_date','tourney_name', 'round', 'winner_name', 'loser_name']].to_csv(sys.stdout,index=False))
def countunder21grandslam(atpmatches):
"""calculates how many players under 21 were in a grand slam main draw"""
matches=atpmatches[(atpmatches['tourney_level'] == 'G') & (atpmatches['round'] == 'R128')]
matches['w_under_21'] = matches.groupby(['tourney_id'])['winner_age'].transform(lambda x: x[x < 21].count())
matches['l_under_21'] = matches.groupby(['tourney_id'])['loser_age'].transform(lambda x: x[x < 21].count())
matches = matches.reset_index().groupby(['tourney_id']).apply(concat)
matches['players_under_21'] = matches['w_under_21']+matches['l_under_21']
matches['players_under_21'] = matches['players_under_21'].astype('int')
matches['player_names'] = matches['w_under_21_names'] + ',' + matches['l_under_21_names']
#matches = matches.sort(['players_under_21'], ascending=True)
print(matches[['tourney_id','tourney_name', 'player_names','players_under_21']].drop_duplicates().to_csv(sys.stdout,index=False))
#print(matches[['tourney_date','tourney_name','players_under_21']].drop_duplicates().to_csv(sys.stdout,index=False))
def concat(group):
"""helper function"""
group['l_under_21_names'] = "%s" % ', '.join(group['loser_name'][group['loser_age'] < 21])
group['w_under_21_names'] = "%s" % ', '.join(group['winner_name'][group['winner_age'] < 21])
return group
def countryTitle(atpmatches):
"""calculates titles per country"""
matches=atpmatches[(atpmatches['round'] == 'F') & ((atpmatches['winner_ioc'] == 'LUX') | (atpmatches['loser_ioc'] == 'LUX'))]
print(matches[['tourney_date','tourney_name','winner_name','loser_name']].to_csv(sys.stdout,index=False))
def youngGsmatchwinners(atpmatches):
"""calculates young grand slam match winners"""
matches=atpmatches[(atpmatches['tourney_level'] == 'G') & (atpmatches['winner_age'] < 18)]
print(matches[['tourney_date','tourney_name','winner_name', 'winner_age', 'loser_name','loser_age']].to_csv(sys.stdout,index=False))
def mostPlayersInTop100OfCountry(rankings):
"""calculates how many players of a country are in the top1000"""
global joinedrankingsdf
#join rankings with playernames
#note: working with the rankings: make sure iso8859-1 is set to encoding when parsing and that file is without BOM
dirname = ".."
playersDB = dirname + "/atp_players.csv"
rankings.columns = ['date', 'rank', 'id','points']
playersdf = pd.read_csv(playersDB,index_col=None,header=None,encoding = "ISO-8859-1")
playersdf.columns = ['id', 'fname', 'lname','hand','dob','country']
joinedrankingsdf = pd.merge(rankings,playersdf, on='id')
joinedrankingsdf=joinedrankingsdf[(joinedrankingsdf['date'] > datetime.date(2005,1,1)) & (joinedrankingsdf['rank'] < 101)]
joinedrankingsdf["fullname"] = joinedrankingsdf["fname"] + ' ' + joinedrankingsdf["lname"]
joinedrankingsdf['namerank'] = joinedrankingsdf['fullname']+ "," + joinedrankingsdf['rank'].map(str)
#joinedrankingsdf["namerank"] = str(joinedrankingsdf["fullname"]) + ',' + str(joinedrankingsdf["rank"])
joinedrankingsdf['auts'] = joinedrankingsdf.groupby(['date'])['country'].transform(lambda x: x[(x.str.contains('AUT').fillna(False))].count())
joinedrankingsdf=joinedrankingsdf[(joinedrankingsdf['auts'] > 3) & (joinedrankingsdf['country'] == 'AUT')]
joinedrankingsdf = joinedrankingsdf.reset_index().groupby(['date']).apply(concatranknames)
joinedrankingsdf = joinedrankingsdf.sort(['date'], ascending=True)
print(joinedrankingsdf[['date', 'country','autnames']].to_csv(sys.stdout,index=False))
def concatranknames(group):
"""helper function"""
group['autnames'] = "%s" % ', '.join(group['namerank'][group['country'] == 'AUT'])
return group
def topSeedsGS(atpmatches):
"""calculates performance of top seeds at grand slams"""
matches=atpmatches[(atpmatches['tourney_level'] == 'G')]
resmatches = matches.reset_index().groupby(['tourney_id']).apply(calcSeeds)
resmatches = resmatches[resmatches['topseeds'] == 0]
print(resmatches[['tourney_date', 'tourney_name','topseeds']].drop_duplicates().to_csv(sys.stdout,index=False))
res2matches = resmatches[((resmatches['round'] == 'R16') | (resmatches['round'] == 'R32') | (resmatches['round'] == 'R128') | (resmatches['round'] == 'R64')) & (resmatches['loser_seed'] < 3) & (resmatches['topseeds'] == 0)]
print(res2matches[['tourney_date', 'tourney_name','round','winner_name','loser_name','loser_seed','loser_rank','score']].to_csv(sys.stdout,index=False))
def calcSeeds(group):
"""helper function"""
group['topseeds'] = len(group[(group['round'] == 'QF') & ((group['winner_seed'] < 3) | (group['loser_seed'] < 3))])
return group
def top10winstitlist(atpmatches):
"""calculates how many top 10 wins a titlist had in the tournament he won"""
#matches = atpmatches[(atpmatches['tourney_date'] > 20000101) & (atpmatches['tourney_level'] != 'D') & (atpmatches['round'] != 'RR') & (atpmatches['tourney_id'] != '2008-438')]
matches = atpmatches[(atpmatches['tourney_date'] > 19900101) & (atpmatches['tourney_level'] == 'A') & (atpmatches['round'] != 'RR') & (atpmatches['tourney_id'] != '2008-438')]
matches = matches.reset_index().groupby(['tourney_id']).apply(calcTop10WinsForTitlist)
matches = matches[(matches['titlistrank'] > 10) & (matches['titlistname'] == matches['winner_name']) & (matches['titlisttop10wins'] > 2)]
print(matches[['tourney_date', 'tourney_name','tourney_level','titlisttop10wins','round','winner_name','winner_rank','loser_name','loser_rank','score']].to_csv(sys.stdout,index=False))
def calcTop10WinsForTitlist(group):
"""helper function"""
#print(group['tourney_id'])
titlistname = group[(group['round'] == 'F')].iloc[[0]]['winner_name'].values[0]
titlistrank = group[(group['round'] == 'F')].iloc[[0]]['winner_rank'].values[0]
group['titlistname'] = titlistname
group['titlistrank'] = titlistrank
group['titlisttop10wins'] = len(group[(group['winner_name'] == titlistname) & (group['loser_rank'] < 11)])
return group
def findLLwhoWOdinQ(atpmatches,qmatches):
"""find if LL wo'd in FQR"""
resultlist = list()
tourney_group = atpmatches.groupby('tourney_id')
for tname, tdf in tourney_group:
found1=False
found2=False
#first_case finds where a LL won against a Q in a main draw (MD)
first_case = tdf[(tdf['winner_entry'] == 'LL')]
#iterating over first_case matches
for index, match in first_case.iterrows():
first_case_results = qmatches[(qmatches['tourney_name'] == match['tourney_name']+ ' Q') & ((qmatches['round'] =='Q2') | (qmatches['round'] =='Q3')) & (match['winner_name'] == qmatches['loser_name'])]
if (len(first_case_results.index) > 0):
#if results were found, add the MD match to the result list
resultlist.append(first_case_results)
#second_case finds where a LL lost against a Q in a main draw (MD)
second_case = tdf[(tdf['loser_entry'] == 'LL')]
for index, match in second_case.iterrows():
second_case_results = qmatches[(qmatches['tourney_name'] == match['tourney_name']+ ' Q') & ((qmatches['round'] =='Q2') | (qmatches['round'] =='Q3')) & (match['loser_name'] == qmatches['loser_name'])]
if (len(second_case_results.index) > 0):
#if results were found, add the MD match to the result list
resultlist.append(second_case_results)
result = pd.concat(resultlist).sort(['tourney_date'], ascending=False)
print(result[['tourney_name','tourney_date','round','winner_name','winner_entry', 'loser_name','loser_entry','score']].to_csv(sys.stdout,index=False))
def highestRanked500finalist(atpmatches):
"""finds highest ranked ATP 500 finalists"""
matches = atpmatches[(atpmatches['tourney_date'] > datetime.date(2008,12,20))]
#atp 500
#if draw size = 32, then 8 seeds
#if draw size = 48, then 16 seeds
#if draw size = 56, then 16 seeds
tourney500names = ['Rotterdam', 'Rio de Janeiro', 'Acapulco', 'Dubai', 'Barcelona', 'Hamburg', 'Washington', 'Beijing', 'Tokyo', 'Valencia', 'Basel', 'Memphis']
matches500 = matches[matches['tourney_name'].isin(tourney500names)]
#remove 2014-402 (= memphis) because in 2014 it was a 250
matches500 = matches500[(matches500['tourney_id'] != '2014-402')]
matches500 = matches500[(matches500['round'] == 'F')]
matches500w = matches500[['tourney_date', 'tourney_name','round','winner_name','winner_rank']]
matches500w['result'] = 'W'
matches500w.columns = ['tourney_date', 'tourney_name','round','player_name','player_rank','result']
matches500l = matches500[['tourney_date', 'tourney_name','round','loser_name','loser_rank']]
matches500l['result'] = 'L'
matches500l.columns = ['tourney_date', 'tourney_name','round','player_name','player_rank','result']
final_dfs = [matches500w, matches500l]
final = pd.concat(final_dfs).sort(['player_rank'], ascending=False)
final['player_rank'] = final['player_rank'].astype(int)
print(final[['tourney_date', 'tourney_name','player_name','player_rank','result']].to_csv(sys.stdout,index=False,sep= '-'))
def ageBetweenPlayers(atpmatches,qmatches,fmatches):
"""finds age between players"""
LIMIT = 40
allmatcheslist = []
allmatcheslist.append(atpmatches)
allmatcheslist.append(qmatches)
allmatcheslist.append(fmatches)
allmatches = pd.concat(allmatcheslist)
allmatches['agediff'] = allmatches['winner_age'] - allmatches['loser_age']
allmatches = allmatches[(allmatches['agediff'] < LIMIT*-1) | (allmatches['agediff'] > LIMIT)]
allmatches['agediff'] = allmatches['agediff'].apply(lambda x: x*-1 if x < 0 else x)
allmatches['winner_age'] = allmatches['winner_age'].round(1)
allmatches['loser_age'] = allmatches['loser_age'].round(1)
allmatches['agediff'] = allmatches['agediff'].round(1)
print(allmatches[['tourney_id', 'tourney_name','winner_name', 'winner_age', 'loser_name', 'loser_age' , 'agediff']].to_csv(sys.stdout,index=False))
def percentageOfSeedWinnersinQ(qmatches):
"""finds percentage of seeded winners in Q"""
#i only want atp 250 qualies here, so i need to filter the 4 grand slams
#i dont have to filter 500 and 1000 qualies because later they dont have a Q3
matches = qmatches[(qmatches['tourney_level'] == 'Q') & (qmatches['round'] == 'Q3') & (qmatches['tourney_name'] != 'US Open Q') & (qmatches['tourney_name'] != 'Wimbledon Q') & (qmatches['tourney_name'] != 'Roland Garros Q') & (qmatches['tourney_name'] != 'Australian Open Q')]
matches['seedw'] = matches.groupby('tourney_id')['winner_seed'].transform(lambda x: x[(x > 0)].count())
matches = matches[['tourney_id', 'tourney_name','seedw']].drop_duplicates()
counts = matches['seedw'].value_counts()
dfcnt = pd.DataFrame(counts, columns=['cnt'])
dfcnt['sum'] = dfcnt['cnt'].sum()
dfcnt['percentage'] = dfcnt['cnt']*100/dfcnt['sum'].round(1)
#print(matches[['tourney_id', 'tourney_name','seedw']].to_csv(sys.stdout,index=False))
print(dfcnt)
def getRankedDict(dict):
"""helper function"""
rank, count, previous, result = 0, 0, None, {}
for key, num in dict:
count += 1
if num != previous:
rank += count
previous = num
count = 0
result[key] = rank
return result
def percentagOfQWinners(qmatches):
"""finds the percentage of Q winners"""
mydict = collections.defaultdict(dict)
#i only want atp 250 qualies here, so i need to filter the 4 grand slams
#i dont have to filter 500 and 1000 qualies because later they dont have a Q3
matches = qmatches[(qmatches['tourney_level'] == 'Q') & (qmatches['round'] == 'Q3') & (qmatches['tourney_name'] != 'US Open Q') & (qmatches['tourney_name'] != 'Wimbledon Q') & (qmatches['tourney_name'] != 'Roland Garros Q') & (qmatches['tourney_name'] != 'Australian Open Q')]
#so matches right now only contains atp250er qualies (because of Q3 filter)
#i now want all these tourney_ids
tourneyids = matches['tourney_id'].unique()
matches_group = qmatches[qmatches['tourney_id'].isin(tourneyids)].groupby('tourney_id')
for tname, tdf in matches_group:
for index, match in tdf.iterrows():
mydict[match.tourney_id][match.winner_name] = 9999.0 if math.isnan(match.winner_rank) else match.winner_rank
mydict[match.tourney_id][match.loser_name] = 9999.0 if math.isnan(match.loser_rank) else match.loser_rank
for key, value in mydict.items():
s_data = sorted(value.items(), key=lambda item: item[1])
result = getRankedDict(s_data)
rankdict[key] = result
for key,value in rankdict.items():
for key1, value1 in value.items():
if (value1 < 9):
value[key1] = 1
if ((value1 < 17) & (value1 > 8)):
value[key1] = 2
if (value1 > 16):
value[key1] = 3
matches['group'] = matches.apply(getGroup, axis=1)
#matches = matches[matches['tourney_id'] == '2015-867']
tournamentcnt = len(matches['tourney_id'].unique())
print("250q tournaments: " + str(tournamentcnt))
matches['groupc'] = matches.groupby('tourney_id')['group'].transform(lambda x: x[(x == 3)].count())
#print(matches[['tourney_id', 'groupc', 'groupb','groupu']].to_csv(sys.stdout,index=False))
groupcmatches = matches[matches['groupc'] > 0 ]
groupctournamentcnt = len(groupcmatches['tourney_id'].unique())
print("250q tournaments with at least one groupc q: " + str(groupctournamentcnt))
print("percentage of tournaments with at least one groupc q: " + str(groupctournamentcnt*100/tournamentcnt))
cntdf = pd.DataFrame([['groupa', len(matches[matches['group'] == 1]),len(matches)],['groupb', len(matches[matches['group'] == 2]),len(matches)],['groupc', len(matches[matches['group'] == 3]),len(matches)]])
cntdf.columns = ['groupname', 'quantity','sum']
cntdf['percentage'] = (cntdf['quantity']*100/cntdf['sum']).round(1)
print(cntdf)
############
print('now for full ATP qs:')
#same as above but only take full q draws
fullqmatches = qmatches[(qmatches['tourney_level'] == 'Q') & (qmatches['round'] == 'Q1') & (qmatches['tourney_name'] != 'US Open Q') & (qmatches['tourney_name'] != 'Wimbledon Q') & (qmatches['tourney_name'] != 'Roland Garros Q') & (qmatches['tourney_name'] != 'Australian Open Q')]
fullqmatches['winners'] = fullqmatches.groupby('tourney_id')['winner_name'].transform(lambda x: x[(x.str.contains('').fillna(False))].count())
fullqmatches = fullqmatches[fullqmatches['winners'] == 16]
fullqmatcheslist = fullqmatches['tourney_id'].unique()
fullqmatchesfinals = qmatches[(qmatches['tourney_id'].isin(fullqmatcheslist)) & (qmatches['tourney_level'] == 'Q') & (qmatches['round'] == 'Q3') & (qmatches['tourney_name'] != 'US Open Q') & (qmatches['tourney_name'] != 'Wimbledon Q') & (qmatches['tourney_name'] != 'Roland Garros Q') & (qmatches['tourney_name'] != 'Australian Open Q')]
tournamentcnt = len(fullqmatchesfinals['tourney_id'].unique())
print("250q tournaments with full q: " + str(tournamentcnt))
fullqmatchesfinals['group'] = fullqmatchesfinals.apply(getGroup, axis=1)
fullqmatchesfinals['groupc'] = fullqmatchesfinals.groupby('tourney_id')['group'].transform(lambda x: x[(x == 3)].count())
groupcmatches = fullqmatchesfinals[fullqmatchesfinals['groupc'] > 0 ]
groupctournamentcnt = len(groupcmatches['tourney_id'].unique())
print("250q tournaments with at least one groupc q: " + str(groupctournamentcnt))
print("percentage of tournaments with at least one groupc q: " + str(groupctournamentcnt*100/tournamentcnt))
cntdf = pd.DataFrame([['groupa', len(fullqmatchesfinals[fullqmatchesfinals['group'] == 1]),len(fullqmatchesfinals)],['groupb', len(fullqmatchesfinals[fullqmatchesfinals['group'] == 2]),len(fullqmatchesfinals)],['groupc', len(fullqmatchesfinals[fullqmatchesfinals['group'] == 3]),len(fullqmatchesfinals)]])
cntdf.columns = ['groupname', 'quantity','sum']
cntdf['percentage'] = (cntdf['quantity']*100/cntdf['sum']).round(1)
print(cntdf)
################
def getGroup(row):
"""helper function"""
tid = row['tourney_id']
name = row['winner_name']
group = rankdict[tid][name]
return group
def findSmallestQDraws(qmatches):
"""finds the smallest Q draws"""
matches = qmatches[(qmatches['tourney_level'] == 'Q') & (qmatches['round'] == 'Q3') & (qmatches['tourney_name'] != 'US Open Q') & (qmatches['tourney_name'] != 'Wimbledon Q') & (qmatches['tourney_name'] != 'Roland Garros Q') & (qmatches['tourney_name'] != 'Australian Open Q')]
tourneyids = matches['tourney_id'].unique()
matches = qmatches[(qmatches['tourney_id'].isin(tourneyids))]
matches = matches.reset_index().groupby('tourney_id').apply(myfunc)
matches = matches.sort('player_sums', ascending=True)
print(matches[['tourney_id', 'tourney_name','player_sums']].drop_duplicates().to_csv(sys.stdout,index=False))
def myfunc(group):
"""helper function"""
#get all players into a set
w_set = set(group['winner_name'])
l_set = set(group['loser_name'])
#u_set contains all names of participating players
group['player_sums'] = len(w_set.union(l_set))
return group
def youngestCombinedAge(atpmatches,qmatches,fmatches):
"""finds youngest combined age"""
LIMIT = 40
allmatcheslist = []
allmatcheslist.append(atpmatches)
allmatcheslist.append(qmatches)
allmatcheslist.append(fmatches)
allmatches = pd.concat(allmatcheslist)
allmatches['agecombined'] = allmatches['winner_age'] + allmatches['loser_age']
allmatches = allmatches[(allmatches['agecombined'] < 37)]
allmatches['winner_age'] = allmatches['winner_age'].round(1)
allmatches['loser_age'] = allmatches['loser_age'].round(1)
allmatches['agecombined'] = allmatches['agecombined'].round(1)
allmatches = allmatches.sort('agecombined', ascending=True)
print(allmatches[['tourney_id', 'tourney_name','winner_name', 'winner_age', 'loser_name', 'loser_age' , 'agecombined']].to_csv(sys.stdout,index=False))
#this needs to be global for percentagOfQWinners() to work
rankdict = collections.defaultdict(dict)
joinedrankingsdf = pd.DataFrame()
#reading ATP level matches. The argument defines the path to the match files.
#since the match files are in the parent directory we provide ".." as an argument
#atpmatches = readATPMatches("..")
atpmatches = readATPMatchesParseTime("..")
#reading Challenger + ATP Q matches
#qmatches = readChall_QATPMatches("..")
#qmatches = readChall_QATPMatchesParseTime("..")
#fmatches = readFMatches("..")
#fmatches = readFMatchesParseTime("..")
#rankings = readAllRankings("..")
#the following lines make use of methods defined above this file. just remove the hash to uncomment the line and use the method.
#matchesPerCountryAndRound(matches)
#findLLQmultipleMatchesAtSameTournament(atpmatches,qmatches)
#bestLLinGrandSlams(atpmatches)
#numberOfSetsLongerThan(atpmatches,2,130)
#geth2hforplayerswrapper(atpmatches,qmatches)
#getwnonh2hs(atpmatches,qmatches,rankings)
#getTop100ChallengerPlayersPerWeek(qmatches)
#getTop100ChallengerPlayersPerWeek(fmatches)
#showTourneysOfDate(fmatches,2011,10,3)
#geth2hforplayer(atpmatches,"Roger Federer")
#getStreaks(fmatches)
#activeplayers = getActivePlayers("..")
#getWinLossByPlayer(fmatches,activeplayers,False)
#seedRanking(atpmatches)
#qualifierSeeded(fmatches)
#rankofQhigherthanlastSeed(atpmatches)
#highRankedQLosers(qmatches,atpmatches)
#avglastseedrank(atpmatches)
#getBestQGrandSlamPlayer(qmatches,rankings)
#getShortestFiveSetter(atpmatches)
#getworstlda(atpmatches)
#getCountriesPerTournament(qmatches)
#getRetsPerPlayer(atpmatches,qmatches,fmatches,activeplayers,False)
#youngestChallengerWinners(qmatches)
#bestNonChampion(players,ranks)
#fedR4WimbiTime(atpmatches)
#youngFutures(fmatches)
#rankingPointsOfYoungsters(players,ranks)
#highestRankedAustriansInR16(atpmatches)
#mostRetsInTourneyPerPlayer(atpmatches)
#mostRetsPerYear(atpmatches)
#mostWCs(atpmatches)
#oldestWinnerATP(atpmatches,qmatches)
#getAces(qmatches)
#getRets(fmatches)
#get1seedWinners(atpmatches)
#getseedWinners(atpmatches)
#getZeroBreakPointChampions(atpmatches)
#easiestOpponents(atpmatches)
#wcwinner(atpmatches)
#titlesataage(atpmatches)
#consecutivlosseswithoutbreaks(atpmatches)
#losetonadalafterwin(atpmatches)
#fouroffiveseedsgone(atpmatches)
#backtobacklosses(atpmatches,'Rafael Nadal')
#titlesdefended(atpmatches)
#titlessurface(atpmatches)
#matchesPerLastNameAndRound(atpmatches)
#bestNeverQFWin(atpmatches,rankings,activeplayers)
#listAllTimeNoQFWins(atpmatches)
#setstats(atpmatches)
#titles(fmatches)
#lowestRankedTitlists(qmatches)
#gamesconcededpertitle(fmatches)
#lastTimeGrandSlamCountry(atpmatches)
#countunder21grandslam(atpmatches)
#countryTitle(fmatches)
#youngGsmatchwinners(atpmatches)
#mostPlayersInTop100OfCountry(rankings)
#topSeedsGS(atpmatches)
#top10winstitlist(atpmatches)
#findLLwhoWOdinQ(atpmatches,qmatches)
#ageBetweenPlayers(atpmatches,qmatches,fmatches)
#percentageOfSeedWinnersinQ(qmatches)
#percentagOfQWinners(qmatches)
#findSmallestQDraws(qmatches)
#youngestCombinedAge(atpmatches,fmatches,qmatches)
highestRanked500finalist(atpmatches)
|
gpl-3.0
|
datascienceguide/datascienceguide.github.io
|
tutorials/document_clustering.py
|
230
|
8356
|
"""
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
|
mit
|
shakamunyi/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/test_saver.py
|
3
|
3455
|
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
class SaverTest(tf.test.TestCase):
def testIris(self):
path = tf.test.get_temp_dir() + '/tmp.saver'
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
# new_classifier = learn.TensorFlowEstimator.restore(path)
# self.assertEqual(type(new_classifier), type(classifier))
# score = accuracy_score(iris.target, new_classifier.predict(iris.data))
# self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testCustomModel(self):
path = tf.test.get_temp_dir() + '/tmp.saver2'
random.seed(42)
iris = datasets.load_iris()
def custom_model(X, y):
return learn.models.logistic_regression(X, y)
classifier = learn.TensorFlowEstimator(model_fn=custom_model, n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
# new_classifier = learn.TensorFlowEstimator.restore(path)
# self.assertEqual(type(new_classifier), type(classifier))
# score = accuracy_score(iris.target, new_classifier.predict(iris.data))
# self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testDNN(self):
path = tf.test.get_temp_dir() + '/tmp_saver3'
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
# new_classifier = learn.TensorFlowEstimator.restore(path)
# self.assertEqual(type(new_classifier), type(classifier))
# score = accuracy_score(iris.target, new_classifier.predict(iris.data))
# self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testNoFolder(self):
with self.assertRaises(ValueError):
learn.TensorFlowEstimator.restore('no_model_path')
def testNoCheckpoints(self):
path = tf.test.get_temp_dir() + '/tmp/tmp.saver4'
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
os.remove(os.path.join(path, 'checkpoint'))
with self.assertRaises(NotImplementedError):
learn.TensorFlowEstimator.restore(path)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
shnizzedy/SM_openSMILE
|
openSMILE_preprocessing/noise_replacement/openSMILE_output_summary.py
|
1
|
1836
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
openSMILE_output_summary.py
Functions to combine and plot emobase and ComParE_2016 summary outputs.
Author:
– Jon Clucas, 2017 ([email protected])
© 2017, Child Mind Institute, Apache v2.0 License
"""
from urllib.request import urlretrieve
import nr_box_plots as nbp, os, pandas as pd
def get_df(config_summary_url):
"""
Function to get a summary table from a web-accessible URL.
Parameters
----------
config_summary_url : string
url of table to import
Returns
-------
config_summary_df : pandas dataframe
table from url
"""
return pd.read_csv(urlretrieve(config_summary_url)[0])
def main():
compare_url = 'https://osf.io/pgevr/?action=download&version=1'
emobase_url = 'https://osf.io/rdn82/?action=download&version=1'
dfs = []
for url in [compare_url, emobase_url]:
dfs.append(get_df(url))
summary = pd.merge(pd.concat((dfs[0], dfs[1])).groupby('method').sum(
).filter(regex='sum*'), pd.concat((dfs[0], dfs[1])).groupby(
'method').mean().filter(regex='mean*'), left_index=True,
right_index=True)
summary['method'] = summary.index
summary = summary[['method', 'sum(MAD)', 'mean(MAD)', 'rank(sum(MAD))',
'rank(mean(MAD))']]
summary.sort_values('rank(mean(MAD))', inplace=True)
out = os.path.join('.', 'replacement_test_outputs',
'adults_replaced_summary')
if not os.path.exists(out):
os.makedirs(out)
summary.to_csv(os.path.join(out, 'mad_rank_summary.csv'), index=False)
for y in ["sum(MAD)", "mean(MAD)"]:
nbp.plot(summary, out, y)
# ============================================================================
if __name__ == '__main__':
main()
|
apache-2.0
|
ggstuart/compost
|
tests.py
|
1
|
9844
|
import unittest
from datetime import timedelta, datetime, timezone
from random import randint, random
from pandas import DataFrame, date_range#, Index
from numpy import nan
from compost import Dataset, ShortDatasetError, SubMinuteTimestepError
from compost import SavingCalculation, DailyAverageModel
class TestDatasetCreation(unittest.TestCase):
"""tests for error states in constructor"""
def setUp(self):
index = date_range('1/1/2015', periods=365)
self.df = DataFrame(list(range(len(index))), index=index, columns=['value'])
def test_sub_minute(self):
self.assertRaises(SubMinuteTimestepError, Dataset, self.df, 30, cumulative=False)
def test_sub_minute_edge(self):
self.assertRaises(SubMinuteTimestepError, Dataset, self.df, 59, cumulative=False)
def test_sub_minute_negative(self):
self.assertRaises(SubMinuteTimestepError, Dataset, self.df, -4, cumulative=False)
def test_sub_minute_one_and_a_bit(self):
self.assertRaises(SubMinuteTimestepError, Dataset, self.df, 64, cumulative=False)
def test_minute_ok(self):
try:
Dataset(self.df, 60, cumulative=False)
except SubMinuteTimestepError:
self.fail("Dataset(df, 60) raised SubMinuteTimestepError!")
class TestPerfectData(unittest.TestCase):
"""what happens with nice data"""
def setUp(self):
index = date_range('1/1/2015', periods=365)
self.df = DataFrame(list(range(len(index))), index=index, columns=['value'])
self.dataset = Dataset(self.df, 60*60*24, cumulative=False)
def test_validates(self):
self.assertTrue(self.dataset.validate())
def test_partial_validates(self):
"""cut the data up and it still works"""
d = Dataset(self.df.head(100), 60*60*24, cumulative=False)
self.assertTrue(d.validate())
def test_short_raises(self):
"""single value datasets raise an error"""
d = Dataset(self.df.head(1), 60*60*24, cumulative=False)
self.assertRaises(ShortDatasetError, d.validate)
def test_interpolate_skipped(self):
d2 = self.dataset.interpolate()
self.assertEqual(self.dataset, d2)
class InterpolatedDataTests(object):
"""common tests for data that needs work"""
def test_validation_fails(self):
self.assertFalse(self.dataset.validate())
def test_interpolate_validates(self):
d1 = self.dataset.interpolate()
self.assertTrue(d1.validate())
def test_interpolate_maintains_total(self):
d1 = self.dataset.interpolate()
self.assertEqual(self.dataset.total(), d1.total())
class TestLowResData(InterpolatedDataTests, unittest.TestCase):
"""what happens with e.g. weekly data"""
def setUp(self):
index = date_range('1/1/2015', periods=5, freq="7D")
df = DataFrame(list(range(len(index))), index=index, columns=['value'])
self.dataset = Dataset(df, 60*60*24, cumulative=False)
super(TestLowResData, self).setUp()
class TestCumulativeLowResData(InterpolatedDataTests, unittest.TestCase):
"""what happens with e.g. weekly cumulative data"""
def setUp(self):
index = date_range('1/1/2015', periods=5, freq="7D")
df = DataFrame(list(range(len(index))), index=index, columns=['value'])
self.dataset = Dataset(df, 60*60*24, cumulative=True)
super(TestCumulativeLowResData, self).setUp()
class TestMissingLowResData(InterpolatedDataTests, unittest.TestCase):
"""what happens when weekly data has missing values?"""
def setUp(self):
index = date_range('1/1/2015', periods=52, freq="7D")
df = DataFrame(list(range(len(index))), index=index, columns=['value'])[index.month != 5]
self.dataset = Dataset(df, 60*60*24, cumulative=False)
super(TestMissingLowResData, self).setUp()
class TestMissingCumulativeLowResData(InterpolatedDataTests, unittest.TestCase):
"""what happens when cumulative weekly data has missing values?"""
def setUp(self):
index = date_range('1/1/2015', periods=52, freq="7D")
df = DataFrame(list(range(len(index))), index=index, columns=['value'])[index.month != 5]
self.dataset = Dataset(df, 60*60*24, cumulative=True)
super(TestMissingCumulativeLowResData, self).setUp()
class TestHighResData(InterpolatedDataTests, unittest.TestCase):
"""what happens with e.g. 15-minutely data"""
def setUp(self):
index = date_range('1/1/2015', periods=4*24*365, freq="15Min")
df = DataFrame(list(range(len(index))), index=index, columns=['value'])
self.dataset = Dataset(df, 60*60*24, cumulative=False)
super(TestHighResData, self).setUp()
class TestCumulativeHighResData(InterpolatedDataTests, unittest.TestCase):
"""what happens with e.g. 15-minutely cumulative data"""
def setUp(self):
index = date_range('1/1/2015', periods=4*24*365, freq="15Min")
df = DataFrame(list(range(len(index))), index=index, columns=['value'])
self.dataset = Dataset(df, 60*60*24, cumulative=True)
super(TestCumulativeHighResData, self).setUp()
class TestMissingHighResData(InterpolatedDataTests, unittest.TestCase):
"""what happens when 15-minutely data has missing values?"""
def setUp(self):
index = date_range('1/1/2015', periods=4*24*365, freq="15Min")
df = DataFrame(list(range(len(index))), index=index, columns=['value'])[index.day != 5]
self.dataset = Dataset(df, 60*60*24, cumulative=False)
super(TestMissingHighResData, self).setUp()
class TestMissingCumulativeHighResData(InterpolatedDataTests, unittest.TestCase):
"""what happens when cumulative 15-minutely data has missing values?"""
def setUp(self):
index = date_range('1/1/2015', periods=4*24*365, freq="15Min")
df = DataFrame(list(range(len(index))), index=index, columns=['value'])[index.day != 5]
self.dataset = Dataset(df, 60*60*24, cumulative=True)
super(TestMissingCumulativeHighResData, self).setUp()
# class TestHighResWithMissingData(unittest.TestCase):
# """what happens when 15-minutely data has missing values?"""
#
# def setUp(self):
# index = date_range('1/1/2015', periods=4*24*365, freq="15Min")
# self.df = DataFrame(list(range(len(index))), index=index, columns=['value'])[index.day >= 3]
# self.dataset1 = Dataset(self.df, 60*60*24)
# self.dataset2 = Dataset(self.df, 60*60*24, cumulative=True)
# def test_validation_fails(self):
# self.assertFalse(self.dataset1.validate())
#
# def test_interpolate_validates(self):
# d1 = self.dataset1.interpolate()
# d2 = self.dataset2.interpolate()
# self.assertTrue(d1.validate())
# self.assertTrue(d2.validate())
#
# def test_interpolate_maintains_total(self):
# # print(self.dataset1.measurements.head(5))
# d1 = self.dataset1.interpolate()
# # print(d1.measurements.head(5))
# d2 = self.dataset2.interpolate()
# self.assertEqual(self.dataset1.measurements.value.sum(), d1.measurements.value.sum())
# self.assertEqual(self.dataset2.measurements.diff().value.sum(), d2.measurements.diff().value.sum())
# class TestDatasetValidation(unittest.TestCase):
#
# def setUp(self):
# index = date_range('1/1/2015', periods=365)
# self.df = DataFrame(list(range(365)), index=index, columns=['value'])
#
# def test_missing(self):
# d = Dataset(self.df[self.df.index.day != 1], 60*60*24)
# self.assertFalse(d.validate())
#
# def test_bad_resolution(self):
# d = Dataset(self.df[self.df.index.day != 1], 60*60*12)
# self.assertFalse(d.validate())
#
#
# class TestDatasetInterpolation(unittest.TestCase):
#
# def setUp(self):
# index = date_range('1/1/2015', periods=366)
# self.df = DataFrame(list(range(366)), index=index, columns=['value'])
#
#
# def test_low_resolution(self):
# d1 = Dataset(self.df, 60*60*12)
# d2 = d1.interpolate()
# self.assertTrue(d2.validate())
#
# def test_high_resolution(self):
# d1 = Dataset(self.df, 60*60*48)
# d2 = d1.interpolate()
# self.assertTrue(d2.validate())
#
# def test_high_resolution_cumulative(self):
# d1 = Dataset(self.df, 60*60*48, cumulative=True)
# d2 = d1.interpolate()
# self.assertTrue(d2.validate())
#
# def test_missing_data(self):
# df = self.df[self.df.index.day != 5] #cut out some data
# d1 = Dataset(df, 60*60*48)
# d2 = d1.interpolate()
# self.assertTrue(d2.validate())
#
# def test_randomised_index(self):
# index = Index([i + timedelta(seconds=randint(-100,100)) for i in self.df.index])
# self.df.index = index
# d1 = Dataset(self.df, 60*60*48)
# d2 = d1.normalise()
# self.assertTrue(d2.validate())
#
class TestSavingCalculation(unittest.TestCase):
def setUp(self):
index = date_range('1/1/2015', periods=4*24*365, freq="15Min")
self.df = DataFrame([random() for i in range(len(index))], index=index, columns=['value'])[index.day != 5]
def test_something(self):
class DateRange(object):
def __init__(self, start, end):
self.start_date = start.replace(tzinfo=timezone.utc)
self.end_date = end.replace(tzinfo=timezone.utc)
baseline = DateRange(datetime(2015,1,1), datetime(2015,4,30))
competition = DateRange(datetime(2015,5,1), datetime(2015,8,31))
sc = SavingCalculation(self.df, DailyAverageModel, competition, baseline, cumulative=False)
savings = sc.savings()
if __name__ == "__main__":
from pandas import __version__
print(f"pandas v{__version__}")
unittest.main()
|
gpl-3.0
|
gon1213/SDC
|
behavioral_cloning/CarND-Transfer-Learning-lab/run_bottleneck.py
|
6
|
3807
|
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.applications.inception_v3 import InceptionV3
from keras.applications.vgg16 import VGG16
from keras.layers import Input, AveragePooling2D
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.datasets import cifar10
import pickle
import tensorflow as tf
import keras.backend as K
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'cifar10', "Make bottleneck features this for dataset, one of 'cifar10', or 'traffic'")
flags.DEFINE_string('network', 'resnet', "The model to bottleneck, one of 'vgg', 'inception', or 'resnet'")
flags.DEFINE_integer('batch_size', 16, 'The batch size for the generator')
batch_size = FLAGS.batch_size
h, w, ch = 224, 224, 3
if FLAGS.network == 'inception':
h, w, ch = 299, 299, 3
from keras.applications.inception_v3 import preprocess_input
img_placeholder = tf.placeholder("uint8", (None, 32, 32, 3))
resize_op = tf.image.resize_images(img_placeholder, (h, w), method=0)
def gen(session, data, labels, batch_size):
def _f():
start = 0
end = start + batch_size
n = data.shape[0]
while True:
X_batch = session.run(resize_op, {img_placeholder: data[start:end]})
X_batch = preprocess_input(X_batch)
y_batch = labels[start:end]
start += batch_size
end += batch_size
if start >= n:
start = 0
end = batch_size
print(start, end)
yield (X_batch, y_batch)
return _f
def create_model():
input_tensor = Input(shape=(h, w, ch))
if FLAGS.network == 'vgg':
model = VGG16(input_tensor=input_tensor, include_top=False)
x = model.output
x = AveragePooling2D((7, 7))(x)
model = Model(model.input, x)
elif FLAGS.network == 'inception':
model = InceptionV3(input_tensor=input_tensor, include_top=False)
x = model.output
x = AveragePooling2D((8, 8), strides=(8, 8))(x)
model = Model(model.input, x)
else:
model = ResNet50(input_tensor=input_tensor, include_top=False)
return model
def main(_):
if FLAGS.dataset == 'cifar10':
(X_train, y_train), (_, _) = cifar10.load_data()
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=0)
else:
with open('data/train.p', mode='rb') as f:
train = pickle.load(f)
X_train, X_val, y_train, y_val = train_test_split(train['features'], train['labels'], test_size=0.33, random_state=0)
train_output_file = "{}_{}_{}.p".format(FLAGS.network, FLAGS.dataset, 'bottleneck_features_train')
validation_output_file = "{}_{}_{}.p".format(FLAGS.network, FLAGS.dataset, 'bottleneck_features_validation')
print("Resizing to", (w, h, ch))
print("Saving to ...")
print(train_output_file)
print(validation_output_file)
with tf.Session() as sess:
K.set_session(sess)
K.set_learning_phase(1)
model = create_model()
print('Bottleneck training')
train_gen = gen(sess, X_train, y_train, batch_size)
bottleneck_features_train = model.predict_generator(train_gen(), X_train.shape[0])
data = {'features': bottleneck_features_train, 'labels': y_train}
pickle.dump(data, open(train_output_file, 'wb'))
print('Bottleneck validation')
val_gen = gen(sess, X_val, y_val, batch_size)
bottleneck_features_validation = model.predict_generator(val_gen(), X_val.shape[0])
data = {'features': bottleneck_features_validation, 'labels': y_val}
pickle.dump(data, open(validation_output_file, 'wb'))
if __name__ == '__main__':
tf.app.run()
|
gpl-3.0
|
ibmsoe/tensorflow
|
tensorflow/python/estimator/inputs/pandas_io_test.py
|
89
|
8340
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(TypeError,
'shuffle must be explicitly set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
|
apache-2.0
|
pratapvardhan/scikit-learn
|
sklearn/neighbors/tests/test_approximate.py
|
55
|
19053
|
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
|
bsd-3-clause
|
tedmeeds/tcga_encoder
|
tcga_encoder/analyses/old/input_cluster_from_z_space.py
|
1
|
19405
|
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.data.pathway_data import Pathways
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
from scipy import stats
def auc_standard_error( theta, nA, nN ):
# from: Hanley and McNeil (1982), The Meaning and Use of the Area under the ROC Curve
# theta: estimated AUC, can be 0.5 for a random test
# nA size of population A
# nN size of population N
Q1=theta/(2.0-theta); Q2=2*theta*theta/(1+theta)
SE = np.sqrt( (theta*(1-theta)+(nA-1)*(Q1-theta*theta) + (nN-1)*(Q2-theta*theta) )/(nA*nN) )
return SE
def auc_test( true_y, est_y ):
n = len(true_y)
n_1 = true_y.sum()
n_0 = n - n_1
if n_1 == 0 or n_1 == n:
return 0.5, 0.0, 0.0, 1.0
auc = roc_auc_score( true_y, est_y )
difference = auc - 0.5
if difference < 0:
# switch labels
se = auc_standard_error( auc, n_0, n_1 )
se_null = auc_standard_error( 0.5, n_0, n_1 )
else:
se = auc_standard_error( 1-auc, n_1, n_0 )
se_null = auc_standard_error( 0.5, n_1, n_0 )
se_combined = np.sqrt( se**2 + se_null**2 )
z_value = np.abs(difference) / se_combined
p_value = 1.0 - stats.norm.cdf( np.abs(z_value) )
return auc, se, z_value, p_value
def find_keepers_over_groups( z, groups, name, nbr2keep, stats2use ):
inners = []; p_inners=[]
mx_inner = 0.0
norm_z = np.linalg.norm(z)
for X, stat in zip( groups, stats2use ):
pearsons = np.zeros( X.shape[1] )
pvalues = np.zeros( X.shape[1] )
for x,x_idx in zip( X.values.T, range(X.shape[1])):
if stat == "pearson":
pearsons[x_idx], pvalues[x_idx] = stats.pearsonr( z, x )
elif stat == "spearman":
pearsons[x_idx], pvalues[x_idx] = stats.spearmanr( z, x )
elif stat == "auc":
true_y = (x>0).astype(int)
auc, se, zvalue, pvalue = auc_test( true_y, z ) #np.sqrt( ses_tissue**2 + se_r_tissue**2 )
pearsons[x_idx] = auc-0.5
pvalues[x_idx] = pvalue
#pdb.set_trace()
#norms = norm_z*np.linalg.norm( X, axis=0 )
#inner = pd.Series( np.dot( z, X )/norms, index = X.columns, name=name )
inner = pd.Series( pearsons, index = X.columns, name=name )
p_inner = pd.Series( pvalues, index = X.columns, name=name )
inners.append(inner)
p_inners.append(p_inner)
this_mx = np.max(np.abs(inner))
if this_mx > mx_inner:
mx_inner = this_mx
all_keepers = []
#all_pvalues = []
for inner,p_inner in zip(inners,p_inners):
#inner.sort_values(inplace=True)
#inner = inner / mx_inner
#abs_inner = np.abs( inner )
#ordered = np.argsort( -inner.values )
ordered = np.argsort( p_inner.values )
ordered = pd.DataFrame( np.vstack( (inner.values[ordered],p_inner.values[ordered] ) ).T, index =inner.index[ordered],columns=["r","p"] )
#pdb.set_trace()
#keepers = pd.concat( [ordered[:nbr2keep], ordered[-nbr2keep:]], axis=0 )
keepers = ordered[:nbr2keep]
#pdb.set_trace()
#keepers = keepers.sort_values()
all_keepers.append(keepers)
return all_keepers
def find_keepers(z, X, name, nbr2keep):
inner = pd.Series( np.dot( z, X ), index = X.columns, name=name )
inner.sort_values(inplace=True)
inner = inner / np.max(np.abs(inner))
#signed = np.sign( inner )
abs_inner = np.abs( inner )
ordered = np.argsort( -abs_inner.values )
ordered = pd.Series( inner.values[ordered], index =inner.index[ordered],name=name )
keepers = ordered[:nbr2keep]
keepers = keepers.sort_values()
return keepers
def main( data_location, results_location ):
pathway_info = Pathways()
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
save_dir = os.path.join( results_path, "input_clustering2" )
check_and_mkdir(save_dir)
z_dir = os.path.join( save_dir, "z_pics" )
check_and_mkdir(z_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
Z = np.vstack( (Z_train.values, Z_val.values) )
n_z = Z.shape[1]
#pdb.set_trace()
z_names = ["z_%d"%z_idx for z_idx in range(Z.shape[1])]
Z = pd.DataFrame( Z, index = np.hstack( (Z_train.index.values, Z_val.index.values)), columns = z_names )
barcodes = np.union1d( Z_train.index.values, Z_val.index.values )
barcodes = data_store["/CLINICAL/observed"][ data_store["/CLINICAL/observed"][["RNA","miRNA","METH","DNA"]].sum(1)==4 ].index.values
Z=Z.loc[barcodes]
Z_values = Z.values
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
rna = np.log(1+data_store["/RNA/RSEM"].loc[ barcodes ])
mirna = np.log(1+data_store["/miRNA/RSEM"].loc[ barcodes ])
meth = np.log(0.1+data_store["/METH/METH"].loc[ barcodes ])
dna = data_store["/DNA/channel/0"].loc[ barcodes ]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
n = len(Z)
n_tissues = len(tissue_names)
rna_normed = rna; mirna_normed = mirna; meth_normed = meth; dna_normed=2*dna-1
for t_idx in range(n_tissues):
t_query = tissue_idx == t_idx
X = rna[t_query]
X -= X.mean(0)
X /= X.std(0)
rna_normed[t_query] = X
X = mirna[t_query]
X -= X.mean(0)
X /= X.std(0)
mirna_normed[t_query] = X
X = meth[t_query]
X -= X.mean(0)
X /= X.std(0)
meth_normed[t_query] = X
nbr = 15
Z_keep_rna=[]
Z_keep_mirna=[]
Z_keep_meth=[]
Z_keep_dna = []
for z_idx in range(n_z):
z_values = Z_values[:,z_idx]
order_z = np.argsort(z_values)
rna_sorted = pd.DataFrame( rna_normed.values[order_z,:], index = barcodes[order_z], columns = rna.columns )
mirna_sorted = pd.DataFrame( mirna_normed.values[order_z,:], index = barcodes[order_z], columns = mirna.columns )
meth_sorted = pd.DataFrame( meth_normed.values[order_z,:], index = barcodes[order_z], columns = meth.columns )
dna_sorted = pd.DataFrame( dna_normed.values[order_z,:], index = barcodes[order_z], columns = dna.columns )
#
# inner_rna = pd.Series( np.dot( z_values, rna_normed ), index = rna_normed.columns, name="rna" )
# inner_rna.sort_values(inplace=True)
# inner_rna = inner_rna / np.max(np.abs(inner_rna))
# sign_rna = np.sign( inner_rna )
# abs_rna = np.abs( inner_rna )
# ordered_rna = np.argsort( -abs_rna.values )
# ordered_rna = pd.Series( inner_rna.values[ordered_rna], index =inner_rna.index[ordered_rna],name="rna" )
#
# keep_rna = ordered_rna[:nbr]
# keep_rna = keep_rna.sort_values()
keep_rna,keep_mirna,keep_meth,keep_dna = find_keepers_over_groups( z_values, [rna_normed,mirna_normed,meth_normed,dna_normed], "z_%d"%(z_idx), nbr, stats2use=["spearman","spearman","spearman","auc"])
# keep_rna = find_keepers( z_values, rna_normed, "z_%d"%(z_idx), nbr )
# keep_mirna = find_keepers( z_values, mirna_normed, "z_%d"%(z_idx), nbr )
# keep_meth = find_keepers( z_values, meth_normed, "z_%d"%(z_idx), nbr )
keep_rna_big,keep_mirna_big,keep_meth_big,keep_dna_big = find_keepers_over_groups( z_values, [rna_normed,mirna_normed,meth_normed,dna_normed], "z_%d"%(z_idx), 2*nbr, stats2use=["spearman","spearman","spearman","auc"])
# keep_rna_big = find_keepers( z_values, rna_normed, "z_%d"%(z_idx), 3*nbr )
# keep_mirna_big = find_keepers( z_values, mirna_normed, "z_%d"%(z_idx), 3*nbr )
# keep_meth_big = find_keepers( z_values, meth_normed, "z_%d"%(z_idx), 3*nbr )
Z_keep_rna.append( keep_rna )
Z_keep_mirna.append( keep_mirna )
Z_keep_meth.append( keep_meth )
Z_keep_dna.append( keep_dna )
f = pp.figure( figsize = (12,8))
ax1 = f.add_subplot(421);ax2 = f.add_subplot(423);ax3 = f.add_subplot(425);ax4 = f.add_subplot(427)
#ax_pie1 = f.add_subplot(422); ax_pie3 = f.add_subplot(424); ax_pie4 = f.add_subplot(426)
ax_pie1 = f.add_subplot(222); #ax_pie3 = f.add_subplot(424); ax_pie4 = f.add_subplot(426)
h1=keep_rna[["r"]].plot(kind='barh',ax=ax1,color="red",legend=False,title=None,fontsize=8); h1.set_xlim(-0.5,0.5); ax1.set_title(""); h1.set_xticklabels([]); ax1.legend(["RNA"])
h2=keep_mirna[["r"]].plot(kind='barh',ax=ax4,color="black",legend=False,title=None,fontsize=8);h2.set_xlim(-0.5,0.5);ax4.set_title(""); ax4.legend(["miRNA"])
h3=keep_meth[["r"]].plot(kind='barh',ax=ax3,color="blue",legend=False,title=None,fontsize=8);h3.set_xlim(-0.5,0.5);ax3.set_title(""); h3.set_xticklabels([]); ax3.legend(["METH"])
h4=keep_dna[["r"]].plot(kind='barh',ax=ax2,color="green",legend=False,title=None,fontsize=8);h4.set_xlim(-0.5,0.5);ax2.set_title(""); h4.set_xticklabels([]); ax2.legend(["DNA"])
neg_dna = pp.find( keep_dna_big.values[:,0]<0) ; pos_dna = pp.find( keep_dna_big.values[:,0]>0)
neg_rna = pp.find( keep_rna_big.values[:,0]<0) ; pos_rna = pp.find( keep_rna_big.values[:,0]>0)
neg_meth = pp.find( keep_meth_big.values[:,0]<0) ; pos_meth = pp.find( keep_meth_big.values[:,0]>0)
#dna_kegg,dna_readable = pathway_info.CancerEnrichment(keep_dna_big.index, np.abs( np.log2(keep_dna_big.values[:,1]) ) )
#rna_kegg,rna_readable = pathway_info.CancerEnrichment(keep_rna_big.index, np.abs( np.log2(keep_rna_big.values[:,1]) ) )
#meth_kegg,meth_readable = pathway_info.CancerEnrichment(keep_meth_big.index, np.abs( np.log2(keep_meth_big.values[:,1]) ) )
dna_kegg,dna_readable = pathway_info.CancerEnrichment(keep_dna_big.index, np.abs(keep_dna_big.values[:,0]) )
rna_kegg,rna_readable = pathway_info.CancerEnrichment(keep_rna_big.index, np.abs( keep_rna_big.values[:,0]) )
meth_kegg,meth_readable = pathway_info.CancerEnrichment(keep_meth_big.index, np.abs( keep_meth_big.values[:,0] ) )
# dna_kegg_p,dna_readable_p = pathway_info.CancerEnrichment(keep_dna_big.index[pos_dna], (np.abs( np.log2(keep_dna_big.values[pos_dna,1]) )>-np.log2(0.01)).astype(float) )
# rna_kegg_p,rna_readable_p = pathway_info.CancerEnrichment(keep_rna_big.index[pos_rna], (np.abs( np.log2(keep_rna_big.values[pos_rna,1]) )>-np.log2(0.01)).astype(float) )
# meth_kegg_p,meth_readable_p = pathway_info.CancerEnrichment(keep_meth_big.index[pos_meth], (np.abs( np.log2(keep_meth_big.values[pos_meth,1]) )>-np.log2(0.01)).astype(float) )
#
# dna_kegg_n,dna_readable_n = pathway_info.CancerEnrichment(keep_dna_big.index[neg_dna], (np.abs( np.log2(keep_dna_big.values[neg_dna,1]) )>-np.log2(0.01)).astype(float) )
# rna_kegg_n,rna_readable_n = pathway_info.CancerEnrichment(keep_rna_big.index[neg_rna], (np.abs( np.log2(keep_rna_big.values[neg_rna,1]) )>-np.log2(0.01)).astype(float) )
# meth_kegg_n,meth_readable_n = pathway_info.CancerEnrichment(keep_meth_big.index[neg_meth], (np.abs( np.log2(keep_meth_big.values[neg_meth,1]) )>-np.log2(0.01)).astype(float) )
# dna_kegg_p,dna_readable_p = pathway_info.CancerEnrichment(keep_dna_big.index[pos_dna], 1.0-keep_dna_big.values[pos_dna,1] )
# rna_kegg_p,rna_readable_p = pathway_info.CancerEnrichment(keep_rna_big.index[pos_rna], 1.0-keep_rna_big.values[pos_rna,1] )
# meth_kegg_p,meth_readable_p = pathway_info.CancerEnrichment(keep_meth_big.index[pos_meth], 1.0-keep_meth_big.values[pos_meth,1])
#
# dna_kegg_n,dna_readable_n = pathway_info.CancerEnrichment(keep_dna_big.index[neg_dna], 1.0-keep_dna_big.values[neg_dna,1] )
# rna_kegg_n,rna_readable_n = pathway_info.CancerEnrichment(keep_rna_big.index[neg_rna], 1.0-keep_rna_big.values[neg_rna,1] )
# meth_kegg_n,meth_readable_n = pathway_info.CancerEnrichment(keep_meth_big.index[neg_meth], 1.0-keep_meth_big.values[neg_meth,1] )
dna_kegg_p,dna_readable_p = pathway_info.CancerEnrichment(keep_dna_big.index[pos_dna], np.abs( keep_dna_big.values[pos_dna,0] ) )
rna_kegg_p,rna_readable_p = pathway_info.CancerEnrichment(keep_rna_big.index[pos_rna], np.abs( keep_rna_big.values[pos_rna,0]) )
meth_kegg_p,meth_readable_p = pathway_info.CancerEnrichment(keep_meth_big.index[pos_meth], np.abs( keep_meth_big.values[pos_meth,0]))
dna_kegg_n,dna_readable_n = pathway_info.CancerEnrichment(keep_dna_big.index[neg_dna], np.abs( keep_dna_big.values[neg_dna,0] ) )
rna_kegg_n,rna_readable_n = pathway_info.CancerEnrichment(keep_rna_big.index[neg_rna], np.abs( keep_rna_big.values[neg_rna,0] ) )
meth_kegg_n,meth_readable_n = pathway_info.CancerEnrichment(keep_meth_big.index[neg_meth], np.abs( keep_meth_big.values[neg_meth,0]) )
# dna_readable_n=-dna_readable_n
# rna_readable_n=-rna_readable_n
# meth_readable_n=-meth_readable_n
rna_readable.name="rna"
meth_readable.name="meth"
dna_readable.name="dna"
rna_readable_p.name="rna_p"
meth_readable_p.name="meth_p"
dna_readable_p.name="dna_p"
rna_readable_n.name="rna_n"
meth_readable_n.name="meth_n"
dna_readable_n.name="dna_n"
# joined = pd.concat( [rna_readable_p[:20],rna_readable_n[:20],\
# dna_readable_p[:20],dna_readable_n[:20],\
# meth_readable_n[:20],meth_readable_p[:20]], axis=1 )
joined = pd.concat( [rna_readable[:20],\
dna_readable[:20],\
meth_readable[:20]], axis=1 )
maxvalues = joined.index[ np.argsort( -np.abs(joined.fillna(0)).sum(1).values ) ]
#pdb.set_trace()
joined=joined.loc[maxvalues]
joined = joined[:25]
#pathways = joined.index.values
#pathways = pathways[ np.argsort(pathways)]
#joined=joined.loc[pathways]
#br = joined[["rna_p","rna_n"]].plot(kind="bar",ax=ax_pie1,color=["blue","red"],legend=False,stacked=True); br.set_xticklabels([]); ax_pie1.set_ylabel("RNA")
#br = joined[["meth_p","meth_n"]].plot(kind="bar",ax=ax_pie4,color=["blue","red"],legend=False,stacked=True); ax_pie4.set_ylabel("METH")
#br = joined[["dna_p","dna_n"]].plot(kind="bar",ax=ax_pie3,color=["blue","red"],legend=False,stacked=True); br.set_xticklabels([]); ax_pie3.set_ylabel("DNA")
#pdb.set_trace()
br = joined.plot(kind="bar",ax=ax_pie1,color=["red","green","blue"],legend=True,stacked=True, sort_columns=False); # ax_pie1.legend(["RNA","DNA","METH"])
#br = joined[["meth_p","meth_n"]].plot(kind="bar",ax=ax_pie4,color=["blue","red"],legend=False,stacked=True); ax_pie4.set_ylabel("METH")
#br = joined[["dna_p","dna_n"]].plot(kind="bar",ax=ax_pie3,color=["blue","red"],legend=False,stacked=True); br.set_xticklabels([]); ax_pie3.set_ylabel("DNA")
#joined[["rna_n","meth_n","dna_n"]].plot(kind="bar",ax=ax_pie1,color="red")
# if len(rna_readable_p)>0:
# rna_readable_p[:12].plot( kind="barh",ax=ax_pie1, fontsize=8, color="green" )
# if len(rna_readable_n)>0:
# rna_readable_n[:12].plot( kind="barh",ax=ax_pie1, fontsize=8, color="red" )
#
# if len(meth_readable_p)>0:
# meth_readable_p[:12].plot( kind="barh",ax=ax_pie1, fontsize=8, color="blue" )
# if len(meth_readable_n)>0:
# meth_readable_n[:12].plot( kind="barh",ax=ax_pie1, fontsize=8, color="purple" )
#
# if len(dna_readable_p)>0:
# dna_readable_p[:12].plot( kind="barh",ax=ax_pie1, fontsize=8, color="yellow" )
# if len(dna_readable_n)>0:
# dna_readable_n[:12].plot( kind="barh",ax=ax_pie1, fontsize=8, color="black" )
#
# #rna_readable[:12].plot.pie( ax=ax_pie1, fontsize=8 )
# if len(meth_readable)>0:
# #meth_readable[:12].plot.pie( ax=ax_pie3, fontsize =8 )
# if len(dna_readable)>0:
# 3dna_readable[:12].plot.pie( ax=ax_pie4, fontsize=8 )
#pp.show()
#pdb.set_trace()
#assert False
#print "normaize over meth and rna anr mirna"
#print "include dna"
#print "up and down pie charts"
#print "add other pathways if not in cancer"
#print "put counts in pies"
print "survival: best per cohort, also double sided on third, go to fifth if enough events"
#pp.show()
#pdb.set_trace()
#f.suptitle( "z %d"%(z_idx) );
#f.subplots_adjust(bottom=0.25);
pp.savefig( z_dir + "/z%d.png"%(z_idx), format="png", dpi=300 )
#print h
pp.close('all')
#pdb.set_trace()
#kept_rna = pd.DataFrame( rna_sorted[keep_rna.index], index=rna_sorted.index, columns = keep_rna.index )
#kept_mirna = pd.DataFrame( mirna_sorted[keep_mirna.index], index=mirna_sorted.index, columns = keep_mirna.index )
#kept_meth = pd.DataFrame( meth_sorted[keep_meth.index], index=meth_sorted.index, columns = keep_meth.index )
merged_rna = pd.concat(Z_keep_rna,axis=1)
merged_mirna = pd.concat(Z_keep_mirna,axis=1)
merged_meth = pd.concat(Z_keep_meth,axis=1)
merged_rna.to_csv( save_dir + "/z_to_rna.csv" )
merged_mirna.to_csv( save_dir + "/z_to_mirna.csv" )
merged_meth.to_csv( save_dir + "/z_to_meth.csv" )
f = sns.clustermap(merged_rna.fillna(0), figsize=(8,6))
pp.setp(f.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pp.setp(f.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
pp.setp(f.ax_heatmap.yaxis.get_majorticklabels(), fontsize=8)
pp.setp(f.ax_heatmap.xaxis.get_majorticklabels(), fontsize=8)
pp.savefig( save_dir + "/clustermap_z_to_rna.png", format="png", dpi=300 )
f = sns.clustermap(merged_mirna.fillna(0), figsize=(8,6))
pp.setp(f.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pp.setp(f.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
pp.setp(f.ax_heatmap.yaxis.get_majorticklabels(), fontsize=8)
pp.setp(f.ax_heatmap.xaxis.get_majorticklabels(), fontsize=8)
pp.savefig( save_dir + "/clustermap_z_to_mirna.png", format="png", dpi=300 )
f = sns.clustermap(merged_meth.fillna(0), figsize=(8,6))
pp.setp(f.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pp.setp(f.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
pp.setp(f.ax_heatmap.yaxis.get_majorticklabels(), fontsize=8)
pp.setp(f.ax_heatmap.xaxis.get_majorticklabels(), fontsize=8)
pp.savefig( save_dir + "/clustermap_z_to_meth.png", format="png", dpi=300 )
#pdb.set_trace()
#pdb.set_trace()
#
# binses = [20,50,100,500]
# for bins in binses:
# pp.figure()
# pp.hist( aucs_true.values.flatten(), bins, range=(0,1), normed=True, histtype="step", lw=3, label="True" )
# pp.hist( aucs_random.values.flatten(), bins, color="red",range=(0,1), normed=True, histtype="step", lw=3, label="Random" )
# #pp.plot( [0,1.0],[0.5,0.5], 'r-', lw=3)
# pp.legend()
# pp.xlabel("Area Under the ROC")
# pp.ylabel("Pr(AUC)")
# pp.title("Comparison between AUC using latent space and random")
# pp.savefig( tissue_dir + "/auc_comparison_%dbins.png"%(bins), format='png', dpi=300 )
#
# pp.close('all')
#pdb.set_trace()
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location )
|
mit
|
jniediek/mne-python
|
mne/surface.py
|
5
|
45614
|
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import os
from os import path as op
import sys
from struct import pack
from glob import glob
from distutils.version import LooseVersion
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix, eye as speye
from .bem import read_bem_surfaces
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tree import dir_tree_find
from .io.tag import find_tag
from .io.write import (write_int, start_file, end_block, start_block, end_file,
write_string, write_float_sparse_rcs)
from .channels.channels import _get_meg_system
from .transforms import transform_surface_to
from .utils import logger, verbose, get_subjects_dir, warn
from .externals.six import string_types
from .fixes import _read_volume_info, _serialize_volume_info
###############################################################################
# AUTOMATED SURFACE FINDING
@verbose
def get_head_surf(subject, source=('bem', 'head'), subjects_dir=None,
verbose=None):
"""Load the subject head surface
Parameters
----------
subject : str
Subject name.
source : str | list of str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory by going
through all files matching the pattern. The head surface will be read
from the first file containing a head surface. Can also be a list
to try multiple strings.
subjects_dir : str, or None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
surf : dict
The head surface.
"""
# Load the head surface from the BEM
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if not isinstance(subject, string_types):
raise TypeError('subject must be a string, not %s' % (type(subject,)))
# use realpath to allow for linked surfaces (c.f. MNE manual 196-197)
if isinstance(source, string_types):
source = [source]
surf = None
for this_source in source:
this_head = op.realpath(op.join(subjects_dir, subject, 'bem',
'%s-%s.fif' % (subject, this_source)))
if op.exists(this_head):
surf = read_bem_surfaces(this_head, True,
FIFF.FIFFV_BEM_SURF_ID_HEAD,
verbose=False)
else:
# let's do a more sophisticated search
path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(path):
raise IOError('Subject bem directory "%s" does not exist'
% path)
files = sorted(glob(op.join(path, '%s*%s.fif'
% (subject, this_source))))
for this_head in files:
try:
surf = read_bem_surfaces(this_head, True,
FIFF.FIFFV_BEM_SURF_ID_HEAD,
verbose=False)
except ValueError:
pass
else:
break
if surf is not None:
break
if surf is None:
raise IOError('No file matching "%s*%s" and containing a head '
'surface found' % (subject, this_source))
logger.info('Using surface from %s' % this_head)
return surf
@verbose
def get_meg_helmet_surf(info, trans=None, verbose=None):
"""Load the MEG helmet associated with the MEG sensors
Parameters
----------
info : instance of Info
Measurement info.
trans : dict
The head<->MRI transformation, usually obtained using
read_trans(). Can be None, in which case the surface will
be in head coordinates instead of MRI coordinates.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
surf : dict
The MEG helmet as a surface.
"""
system = _get_meg_system(info)
logger.info('Getting helmet for system %s' % system)
fname = op.join(op.split(__file__)[0], 'data', 'helmets',
system + '.fif.gz')
surf = read_bem_surfaces(fname, False, FIFF.FIFFV_MNE_SURF_MEG_HELMET,
verbose=False)
# Ignore what the file says, it's in device coords and we want MRI coords
surf['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
transform_surface_to(surf, 'head', info['dev_head_t'])
if trans is not None:
transform_surface_to(surf, 'mri', trans)
return surf
###############################################################################
# EFFICIENCY UTILITIES
def fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def _fast_cross_nd_sum(a, b, c):
"""Fast cross and sum"""
return ((a[..., 1] * b[..., 2] - a[..., 2] * b[..., 1]) * c[..., 0] +
(a[..., 2] * b[..., 0] - a[..., 0] * b[..., 2]) * c[..., 1] +
(a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]) * c[..., 2])
def _accumulate_normals(tris, tri_nn, npts):
"""Efficiently accumulate triangle normals"""
# this code replaces the following, but is faster (vectorized):
#
# this['nn'] = np.zeros((this['np'], 3))
# for p in xrange(this['ntri']):
# verts = this['tris'][p]
# this['nn'][verts, :] += this['tri_nn'][p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts, weights=tri_nn[:, idx],
minlength=npts)
return nn
def _triangle_neighbors(tris, npts):
"""Efficiently compute vertex neighboring triangles"""
# this code replaces the following, but is faster (vectorized):
#
# this['neighbor_tri'] = [list() for _ in xrange(this['np'])]
# for p in xrange(this['ntri']):
# verts = this['tris'][p]
# this['neighbor_tri'][verts[0]].append(p)
# this['neighbor_tri'][verts[1]].append(p)
# this['neighbor_tri'][verts[2]].append(p)
# this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
#
verts = tris.ravel()
counts = np.bincount(verts, minlength=npts)
reord = np.argsort(verts)
tri_idx = np.unravel_index(reord, (len(tris), 3))[0]
idx = np.cumsum(np.r_[0, counts])
# the sort below slows it down a bit, but is needed for equivalence
neighbor_tri = [np.sort(tri_idx[v1:v2])
for v1, v2 in zip(idx[:-1], idx[1:])]
return neighbor_tri
def _triangle_coords(r, geom, best):
"""Get coordinates of a vertex projected to a triangle"""
r1 = geom['r1'][best]
tri_nn = geom['nn'][best]
r12 = geom['r12'][best]
r13 = geom['r13'][best]
a = geom['a'][best]
b = geom['b'][best]
c = geom['c'][best]
rr = r - r1
z = np.sum(rr * tri_nn)
v1 = np.sum(rr * r12)
v2 = np.sum(rr * r13)
det = a * b - c * c
x = (b * v1 - c * v2) / det
y = (a * v2 - c * v1) / det
return x, y, z
@verbose
def _complete_surface_info(this, do_neighbor_vert=False, verbose=None):
"""Complete surface info"""
# based on mne_source_space_add_geometry_info() in mne_add_geometry_info.c
# Main triangulation [mne_add_triangle_data()]
this['tri_area'] = np.zeros(this['ntri'])
r1 = this['rr'][this['tris'][:, 0], :]
r2 = this['rr'][this['tris'][:, 1], :]
r3 = this['rr'][this['tris'][:, 2], :]
this['tri_cent'] = (r1 + r2 + r3) / 3.0
this['tri_nn'] = fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(this['tri_nn'] ** 2, axis=1))
this['tri_area'] = size / 2.0
zidx = np.where(size == 0)[0]
for idx in zidx:
logger.info(' Warning: zero size triangle # %s' % idx)
size[zidx] = 1.0 # prevent ugly divide-by-zero
this['tri_nn'] /= size[:, None]
# Find neighboring triangles, accumulate vertex normals, normalize
logger.info(' Triangle neighbors and vertex normals...')
this['neighbor_tri'] = _triangle_neighbors(this['tris'], this['np'])
this['nn'] = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])
_normalize_vectors(this['nn'])
# Check for topological defects
idx = np.where([len(n) == 0 for n in this['neighbor_tri']])[0]
if len(idx) > 0:
logger.info(' Vertices [%s] do not have any neighboring'
'triangles!' % ','.join([str(ii) for ii in idx]))
idx = np.where([len(n) < 3 for n in this['neighbor_tri']])[0]
if len(idx) > 0:
logger.info(' Vertices [%s] have fewer than three neighboring '
'tris, omitted' % ','.join([str(ii) for ii in idx]))
for k in idx:
this['neighbor_tri'] = np.array([], int)
# Determine the neighboring vertices and fix errors
if do_neighbor_vert is True:
logger.info(' Vertex neighbors...')
this['neighbor_vert'] = [_get_surf_neighbors(this, k)
for k in range(this['np'])]
return this
def _get_surf_neighbors(surf, k):
"""Calculate the surface neighbors based on triangulation"""
verts = surf['tris'][surf['neighbor_tri'][k]]
verts = np.setdiff1d(verts, [k], assume_unique=False)
assert np.all(verts < surf['np'])
nneighbors = len(verts)
nneigh_max = len(surf['neighbor_tri'][k])
if nneighbors > nneigh_max:
raise RuntimeError('Too many neighbors for vertex %d' % k)
elif nneighbors != nneigh_max:
logger.info(' Incorrect number of distinct neighbors for vertex'
' %d (%d instead of %d) [fixed].' % (k, nneighbors,
nneigh_max))
return verts
def _normalize_vectors(rr):
"""Normalize surface vertices"""
size = np.sqrt(np.sum(rr * rr, axis=1))
size[size == 0] = 1.0 # avoid divide-by-zero
rr /= size[:, np.newaxis] # operate in-place
def _compute_nearest(xhs, rr, use_balltree=True, return_dists=False):
"""Find nearest neighbors
Note: The rows in xhs and rr must all be unit-length vectors, otherwise
the result will be incorrect.
Parameters
----------
xhs : array, shape=(n_samples, n_dim)
Points of data set.
rr : array, shape=(n_query, n_dim)
Points to find nearest neighbors for.
use_balltree : bool
Use fast BallTree based search from scikit-learn. If scikit-learn
is not installed it will fall back to the slow brute force search.
return_dists : bool
If True, return associated distances.
Returns
-------
nearest : array, shape=(n_query,)
Index of nearest neighbor in xhs for every point in rr.
distances : array, shape=(n_query,)
The distances. Only returned if return_dists is True.
"""
if use_balltree:
try:
from sklearn.neighbors import BallTree
except ImportError:
logger.info('Nearest-neighbor searches will be significantly '
'faster if scikit-learn is installed.')
use_balltree = False
if xhs.size == 0 or rr.size == 0:
if return_dists:
return np.array([], int), np.array([])
return np.array([], int)
if use_balltree is True:
ball_tree = BallTree(xhs)
if return_dists:
out = ball_tree.query(rr, k=1, return_distance=True)
return out[1][:, 0], out[0][:, 0]
else:
nearest = ball_tree.query(rr, k=1, return_distance=False)[:, 0]
return nearest
else:
from scipy.spatial.distance import cdist
if return_dists:
nearest = list()
dists = list()
for r in rr:
d = cdist(r[np.newaxis, :], xhs)
idx = np.argmin(d)
nearest.append(idx)
dists.append(d[0, idx])
return (np.array(nearest), np.array(dists))
else:
nearest = np.array([np.argmin(cdist(r[np.newaxis, :], xhs))
for r in rr])
return nearest
###############################################################################
# Handle freesurfer
def _fread3(fobj):
"""Docstring"""
b1, b2, b3 = np.fromfile(fobj, ">u1", 3)
return (b1 << 16) + (b2 << 8) + b3
def _fread3_many(fobj, n):
"""Read 3-byte ints from an open binary file object."""
b1, b2, b3 = np.fromfile(fobj, ">u1",
3 * n).reshape(-1, 3).astype(np.int).T
return (b1 << 16) + (b2 << 8) + b3
def read_curvature(filepath):
"""Load in curavature values from the ?h.curv file."""
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic == 16777215:
vnum = np.fromfile(fobj, ">i4", 3)[0]
curv = np.fromfile(fobj, ">f4", vnum)
else:
vnum = magic
_fread3(fobj)
curv = np.fromfile(fobj, ">i2", vnum) / 100
bin_curv = 1 - np.array(curv != 0, np.int)
return bin_curv
@verbose
def read_surface(fname, read_metadata=False, verbose=None):
"""Load a Freesurfer surface mesh in triangular format
Parameters
----------
fname : str
The name of the file containing the surface.
read_metadata : bool
Read metadata as key-value pairs.
Valid keys:
* 'head' : array of int
* 'valid' : str
* 'filename' : str
* 'volume' : array of int, shape (3,)
* 'voxelsize' : array of float, shape (3,)
* 'xras' : array of float, shape (3,)
* 'yras' : array of float, shape (3,)
* 'zras' : array of float, shape (3,)
* 'cras' : array of float, shape (3,)
.. versionadded:: 0.13.0
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
rr : array, shape=(n_vertices, 3)
Coordinate points.
tris : int array, shape=(n_faces, 3)
Triangulation (each line contains indices for three points which
together form a face).
volume_info : dict-like
If read_metadata is true, key-value pairs found in the geometry file.
See Also
--------
write_surface
read_tri
"""
try:
import nibabel as nib
has_nibabel = True
except ImportError:
has_nibabel = False
if has_nibabel and LooseVersion(nib.__version__) > LooseVersion('2.1.0'):
return nib.freesurfer.read_geometry(fname, read_metadata=read_metadata)
volume_info = dict()
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
with open(fname, "rb", buffering=0) as fobj: # buffering=0 for np bug
magic = _fread3(fobj)
# Quad file or new quad
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC):
create_stamp = ''
nvert = _fread3(fobj)
nquad = _fread3(fobj)
(fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div
coords = coords.reshape(-1, 3)
quads = _fread3_many(fobj, nquad * 4)
quads = quads.reshape(nquad, 4)
# Face splitting follows
faces = np.zeros((2 * nquad, 3), dtype=np.int)
nface = 0
for quad in quads:
if (quad[0] % 2) == 0:
faces[nface:nface + 2] = [[quad[0], quad[1], quad[3]],
[quad[2], quad[3], quad[1]]]
else:
faces[nface:nface + 2] = [[quad[0], quad[1], quad[2]],
[quad[0], quad[2], quad[3]]]
nface += 2
elif magic == TRIANGLE_MAGIC: # Triangle file
create_stamp = fobj.readline()
fobj.readline()
vnum = np.fromfile(fobj, ">i4", 1)[0]
fnum = np.fromfile(fobj, ">i4", 1)[0]
coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
if read_metadata:
volume_info = _read_volume_info(fobj)
else:
raise ValueError("%s does not appear to be a Freesurfer surface"
% fname)
logger.info('Triangle file: %s nvert = %s ntri = %s'
% (create_stamp.strip(), len(coords), len(faces)))
coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits
ret = (coords, faces)
if read_metadata:
if len(volume_info) == 0:
warn('No volume information contained in the file')
ret += (volume_info,)
return ret
@verbose
def _read_surface_geom(fname, patch_stats=True, norm_rr=False,
read_metadata=False, verbose=None):
"""Load the surface as dict, optionally add the geometry information"""
# based on mne_load_surface_geom() in mne_surface_io.c
if isinstance(fname, string_types):
ret = read_surface(fname, read_metadata=read_metadata)
nvert = len(ret[0])
ntri = len(ret[1])
s = dict(rr=ret[0], tris=ret[1], use_tris=ret[1], ntri=ntri, np=nvert)
elif isinstance(fname, dict):
s = fname
else:
raise RuntimeError('fname cannot be understood as str or dict')
if patch_stats is True:
s = _complete_surface_info(s)
if norm_rr is True:
_normalize_vectors(s['rr'])
if read_metadata:
return s, ret[2]
return s
##############################################################################
# SURFACE CREATION
def _get_ico_surface(grade, patch_stats=False):
"""Return an icosahedral surface of the desired grade"""
# always use verbose=False since users don't need to know we're pulling
# these from a file
ico_file_name = op.join(op.dirname(__file__), 'data',
'icos.fif.gz')
ico = read_bem_surfaces(ico_file_name, patch_stats, s_id=9000 + grade,
verbose=False)
return ico
def _tessellate_sphere_surf(level, rad=1.0):
"""Return a surface structure instead of the details"""
rr, tris = _tessellate_sphere(level)
npt = len(rr) # called "npt" instead of "np" because of numpy...
ntri = len(tris)
nn = rr.copy()
rr *= rad
s = dict(rr=rr, np=npt, tris=tris, use_tris=tris, ntri=ntri, nuse=np,
nn=nn, inuse=np.ones(npt, int))
return s
def _norm_midpt(ai, bi, rr):
a = np.array([rr[aii] for aii in ai])
b = np.array([rr[bii] for bii in bi])
c = (a + b) / 2.
return c / np.sqrt(np.sum(c ** 2, 1))[:, np.newaxis]
def _tessellate_sphere(mylevel):
"""Create a tessellation of a unit sphere"""
# Vertices of a unit octahedron
rr = np.array([[1, 0, 0], [-1, 0, 0], # xplus, xminus
[0, 1, 0], [0, -1, 0], # yplus, yminus
[0, 0, 1], [0, 0, -1]], float) # zplus, zminus
tris = np.array([[0, 4, 2], [2, 4, 1], [1, 4, 3], [3, 4, 0],
[0, 2, 5], [2, 1, 5], [1, 3, 5], [3, 0, 5]], int)
# A unit octahedron
if mylevel < 1:
raise ValueError('# of levels must be >= 1')
# Reverse order of points in each triangle
# for counter-clockwise ordering
tris = tris[:, [2, 1, 0]]
# Subdivide each starting triangle (mylevel - 1) times
for _ in range(1, mylevel):
"""
Subdivide each triangle in the old approximation and normalize
the new points thus generated to lie on the surface of the unit
sphere.
Each input triangle with vertices labelled [0,1,2] as shown
below will be turned into four new triangles:
Make new points
a = (0+2)/2
b = (0+1)/2
c = (1+2)/2
1
/\ Normalize a, b, c
/ \
b/____\c Construct new triangles
/\ /\ [0,b,a]
/ \ / \ [b,1,c]
/____\/____\ [a,b,c]
0 a 2 [a,c,2]
"""
# use new method: first make new points (rr)
a = _norm_midpt(tris[:, 0], tris[:, 2], rr)
b = _norm_midpt(tris[:, 0], tris[:, 1], rr)
c = _norm_midpt(tris[:, 1], tris[:, 2], rr)
lims = np.cumsum([len(rr), len(a), len(b), len(c)])
aidx = np.arange(lims[0], lims[1])
bidx = np.arange(lims[1], lims[2])
cidx = np.arange(lims[2], lims[3])
rr = np.concatenate((rr, a, b, c))
# now that we have our points, make new triangle definitions
tris = np.array((np.c_[tris[:, 0], bidx, aidx],
np.c_[bidx, tris[:, 1], cidx],
np.c_[aidx, bidx, cidx],
np.c_[aidx, cidx, tris[:, 2]]), int).swapaxes(0, 1)
tris = np.reshape(tris, (np.prod(tris.shape[:2]), 3))
# Copy the resulting approximation into standard table
rr_orig = rr
rr = np.empty_like(rr)
nnode = 0
for k, tri in enumerate(tris):
for j in range(3):
coord = rr_orig[tri[j]]
# this is faster than cdist (no need for sqrt)
similarity = np.dot(rr[:nnode], coord)
idx = np.where(similarity > 0.99999)[0]
if len(idx) > 0:
tris[k, j] = idx[0]
else:
rr[nnode] = coord
tris[k, j] = nnode
nnode += 1
rr = rr[:nnode].copy()
return rr, tris
def _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
subjects_dir):
"""Load a surf and use the subdivided icosahedron to get points"""
# Based on load_source_space_surf_spacing() in load_source_space.c
surf = _read_surface_geom(surf)
if stype in ['ico', 'oct']:
# ## from mne_ico_downsample.c ## #
surf_name = op.join(subjects_dir, subject, 'surf', hemi + '.sphere')
logger.info('Loading geometry from %s...' % surf_name)
from_surf = _read_surface_geom(surf_name, norm_rr=True,
patch_stats=False)
if not len(from_surf['rr']) == surf['np']:
raise RuntimeError('Mismatch between number of surface vertices, '
'possible parcellation error?')
_normalize_vectors(ico_surf['rr'])
# Make the maps
logger.info('Mapping %s %s -> %s (%d) ...'
% (hemi, subject, stype, sval))
mmap = _compute_nearest(from_surf['rr'], ico_surf['rr'])
nmap = len(mmap)
surf['inuse'] = np.zeros(surf['np'], int)
for k in range(nmap):
if surf['inuse'][mmap[k]]:
# Try the nearest neighbors
neigh = _get_surf_neighbors(surf, mmap[k])
was = mmap[k]
inds = np.where(np.logical_not(surf['inuse'][neigh]))[0]
if len(inds) == 0:
raise RuntimeError('Could not find neighbor for vertex '
'%d / %d' % (k, nmap))
else:
mmap[k] = neigh[inds[-1]]
logger.info(' Source space vertex moved from %d to %d '
'because of double occupation', was, mmap[k])
elif mmap[k] < 0 or mmap[k] > surf['np']:
raise RuntimeError('Map number out of range (%d), this is '
'probably due to inconsistent surfaces. '
'Parts of the FreeSurfer reconstruction '
'need to be redone.' % mmap[k])
surf['inuse'][mmap[k]] = True
logger.info('Setting up the triangulation for the decimated '
'surface...')
surf['use_tris'] = np.array([mmap[ist] for ist in ico_surf['tris']],
np.int32)
else: # use_all is True
surf['inuse'] = np.ones(surf['np'], int)
surf['use_tris'] = None
if surf['use_tris'] is not None:
surf['nuse_tri'] = len(surf['use_tris'])
else:
surf['nuse_tri'] = 0
surf['nuse'] = np.sum(surf['inuse'])
surf['vertno'] = np.where(surf['inuse'])[0]
# set some final params
inds = np.arange(surf['np'])
sizes = np.sqrt(np.sum(surf['nn'] ** 2, axis=1))
surf['nn'][inds] = surf['nn'][inds] / sizes[:, np.newaxis]
surf['inuse'][sizes <= 0] = False
surf['nuse'] = np.sum(surf['inuse'])
surf['subject_his_id'] = subject
return surf
def write_surface(fname, coords, faces, create_stamp='', volume_info=None):
"""Write a triangular Freesurfer surface mesh
Accepts the same data format as is returned by read_surface().
Parameters
----------
fname : str
File to write.
coords : array, shape=(n_vertices, 3)
Coordinate points.
faces : int array, shape=(n_faces, 3)
Triangulation (each line contains indices for three points which
together form a face).
create_stamp : str
Comment that is written to the beginning of the file. Can not contain
line breaks.
volume_info : dict-like or None
Key-value pairs to encode at the end of the file.
Valid keys:
* 'head' : array of int
* 'valid' : str
* 'filename' : str
* 'volume' : array of int, shape (3,)
* 'voxelsize' : array of float, shape (3,)
* 'xras' : array of float, shape (3,)
* 'yras' : array of float, shape (3,)
* 'zras' : array of float, shape (3,)
* 'cras' : array of float, shape (3,)
.. versionadded:: 0.13.0
See Also
--------
read_surface
read_tri
"""
try:
import nibabel as nib
has_nibabel = True
except ImportError:
has_nibabel = False
if has_nibabel and LooseVersion(nib.__version__) > LooseVersion('2.1.0'):
nib.freesurfer.io.write_geometry(fname, coords, faces,
create_stamp=create_stamp,
volume_info=volume_info)
return
if len(create_stamp.splitlines()) > 1:
raise ValueError("create_stamp can only contain one line")
with open(fname, 'wb') as fid:
fid.write(pack('>3B', 255, 255, 254))
strs = ['%s\n' % create_stamp, '\n']
strs = [s.encode('utf-8') for s in strs]
fid.writelines(strs)
vnum = len(coords)
fnum = len(faces)
fid.write(pack('>2i', vnum, fnum))
fid.write(np.array(coords, dtype='>f4').tostring())
fid.write(np.array(faces, dtype='>i4').tostring())
# Add volume info, if given
if volume_info is not None and len(volume_info) > 0:
fid.write(_serialize_volume_info(volume_info))
###############################################################################
# Decimation
def _decimate_surface(points, triangles, reduction):
"""Aux function"""
if 'DISPLAY' not in os.environ and sys.platform != 'win32':
os.environ['ETS_TOOLKIT'] = 'null'
try:
from tvtk.api import tvtk
from tvtk.common import configure_input
except ImportError:
raise ValueError('This function requires the TVTK package to be '
'installed')
if triangles.max() > len(points) - 1:
raise ValueError('The triangles refer to undefined points. '
'Please check your mesh.')
src = tvtk.PolyData(points=points, polys=triangles)
decimate = tvtk.QuadricDecimation(target_reduction=reduction)
configure_input(decimate, src)
decimate.update()
out = decimate.output
tris = out.polys.to_array()
# n-tuples + interleaved n-next -- reshape trick
return out.points.to_array(), tris.reshape(tris.size / 4, 4)[:, 1:]
def decimate_surface(points, triangles, n_triangles):
""" Decimate surface data
Note. Requires TVTK to be installed for this to function.
Note. If an if an odd target number was requested,
the ``quadric decimation`` algorithm used results in the
next even number of triangles. For example a reduction request to 30001
triangles will result in 30000 triangles.
Parameters
----------
points : ndarray
The surface to be decimated, a 3 x number of points array.
triangles : ndarray
The surface to be decimated, a 3 x number of triangles array.
n_triangles : int
The desired number of triangles.
Returns
-------
points : ndarray
The decimated points.
triangles : ndarray
The decimated triangles.
"""
reduction = 1 - (float(n_triangles) / len(triangles))
return _decimate_surface(points, triangles, reduction)
###############################################################################
# Morph maps
@verbose
def read_morph_map(subject_from, subject_to, subjects_dir=None,
verbose=None):
"""Read morph map
Morph maps can be generated with mne_make_morph_maps. If one isn't
available, it will be generated automatically and saved to the
``subjects_dir/morph_maps`` directory.
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
subjects_dir : string
Path to SUBJECTS_DIR is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
left_map, right_map : sparse matrix
The morph maps for the 2 hemispheres.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# First check for morph-map dir existence
mmap_dir = op.join(subjects_dir, 'morph-maps')
if not op.isdir(mmap_dir):
try:
os.mkdir(mmap_dir)
except Exception:
warn('Could not find or make morph map directory "%s"' % mmap_dir)
# Does the file exist
fname = op.join(mmap_dir, '%s-%s-morph.fif' % (subject_from, subject_to))
if not op.exists(fname):
fname = op.join(mmap_dir, '%s-%s-morph.fif'
% (subject_to, subject_from))
if not op.exists(fname):
warn('Morph map "%s" does not exist, creating it and saving it to '
'disk (this may take a few minutes)' % fname)
logger.info('Creating morph map %s -> %s'
% (subject_from, subject_to))
mmap_1 = _make_morph_map(subject_from, subject_to, subjects_dir)
logger.info('Creating morph map %s -> %s'
% (subject_to, subject_from))
mmap_2 = _make_morph_map(subject_to, subject_from, subjects_dir)
try:
_write_morph_map(fname, subject_from, subject_to,
mmap_1, mmap_2)
except Exception as exp:
warn('Could not write morph-map file "%s" (error: %s)'
% (fname, exp))
return mmap_1
f, tree, _ = fiff_open(fname)
with f as fid:
# Locate all maps
maps = dir_tree_find(tree, FIFF.FIFFB_MNE_MORPH_MAP)
if len(maps) == 0:
raise ValueError('Morphing map data not found')
# Find the correct ones
left_map = None
right_map = None
for m in maps:
tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP_FROM)
if tag.data == subject_from:
tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP_TO)
if tag.data == subject_to:
# Names match: which hemishere is this?
tag = find_tag(fid, m, FIFF.FIFF_MNE_HEMI)
if tag.data == FIFF.FIFFV_MNE_SURF_LEFT_HEMI:
tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP)
left_map = tag.data
logger.info(' Left-hemisphere map read.')
elif tag.data == FIFF.FIFFV_MNE_SURF_RIGHT_HEMI:
tag = find_tag(fid, m, FIFF.FIFF_MNE_MORPH_MAP)
right_map = tag.data
logger.info(' Right-hemisphere map read.')
if left_map is None or right_map is None:
raise ValueError('Could not find both hemispheres in %s' % fname)
return left_map, right_map
def _write_morph_map(fname, subject_from, subject_to, mmap_1, mmap_2):
"""Write a morph map to disk"""
fid = start_file(fname)
assert len(mmap_1) == 2
assert len(mmap_2) == 2
hemis = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for m, hemi in zip(mmap_1, hemis):
start_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_FROM, subject_from)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_TO, subject_to)
write_int(fid, FIFF.FIFF_MNE_HEMI, hemi)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_MORPH_MAP, m)
end_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
for m, hemi in zip(mmap_2, hemis):
start_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_FROM, subject_to)
write_string(fid, FIFF.FIFF_MNE_MORPH_MAP_TO, subject_from)
write_int(fid, FIFF.FIFF_MNE_HEMI, hemi)
write_float_sparse_rcs(fid, FIFF.FIFF_MNE_MORPH_MAP, m)
end_block(fid, FIFF.FIFFB_MNE_MORPH_MAP)
end_file(fid)
def _get_tri_dist(p, q, p0, q0, a, b, c, dist):
"""Auxiliary function for getting the distance to a triangle edge"""
return np.sqrt((p - p0) * (p - p0) * a +
(q - q0) * (q - q0) * b +
(p - p0) * (q - q0) * c +
dist * dist)
def _get_tri_supp_geom(tris, rr):
"""Create supplementary geometry information using tris and rrs"""
r1 = rr[tris[:, 0], :]
r12 = rr[tris[:, 1], :] - r1
r13 = rr[tris[:, 2], :] - r1
r1213 = np.array([r12, r13]).swapaxes(0, 1)
a = np.sum(r12 * r12, axis=1)
b = np.sum(r13 * r13, axis=1)
c = np.sum(r12 * r13, axis=1)
mat = np.rollaxis(np.array([[b, -c], [-c, a]]), 2)
mat /= (a * b - c * c)[:, np.newaxis, np.newaxis]
nn = fast_cross_3d(r12, r13)
_normalize_vectors(nn)
return dict(r1=r1, r12=r12, r13=r13, r1213=r1213,
a=a, b=b, c=c, mat=mat, nn=nn)
@verbose
def _make_morph_map(subject_from, subject_to, subjects_dir=None):
"""Construct morph map from one subject to another
Note that this is close, but not exactly like the C version.
For example, parts are more accurate due to double precision,
so expect some small morph-map differences!
Note: This seems easily parallelizable, but the overhead
of pickling all the data structures makes it less efficient
than just running on a single core :(
"""
subjects_dir = get_subjects_dir(subjects_dir)
morph_maps = list()
# add speedy short-circuit for self-maps
if subject_from == subject_to:
for hemi in ['lh', 'rh']:
fname = op.join(subjects_dir, subject_from, 'surf',
'%s.sphere.reg' % hemi)
from_pts = read_surface(fname, verbose=False)[0]
n_pts = len(from_pts)
morph_maps.append(speye(n_pts, n_pts, format='csr'))
return morph_maps
for hemi in ['lh', 'rh']:
# load surfaces and normalize points to be on unit sphere
fname = op.join(subjects_dir, subject_from, 'surf',
'%s.sphere.reg' % hemi)
from_pts, from_tris = read_surface(fname, verbose=False)
n_from_pts = len(from_pts)
_normalize_vectors(from_pts)
tri_geom = _get_tri_supp_geom(from_tris, from_pts)
fname = op.join(subjects_dir, subject_to, 'surf',
'%s.sphere.reg' % hemi)
to_pts = read_surface(fname, verbose=False)[0]
n_to_pts = len(to_pts)
_normalize_vectors(to_pts)
# from surface: get nearest neighbors, find triangles for each vertex
nn_pts_idx = _compute_nearest(from_pts, to_pts)
from_pt_tris = _triangle_neighbors(from_tris, len(from_pts))
from_pt_tris = [from_pt_tris[pt_idx] for pt_idx in nn_pts_idx]
# find triangle in which point lies and assoc. weights
nn_tri_inds = []
nn_tris_weights = []
for pt_tris, to_pt in zip(from_pt_tris, to_pts):
p, q, idx, dist = _find_nearest_tri_pt(pt_tris, to_pt, tri_geom)
nn_tri_inds.append(idx)
nn_tris_weights.extend([1. - (p + q), p, q])
nn_tris = from_tris[nn_tri_inds]
row_ind = np.repeat(np.arange(n_to_pts), 3)
this_map = csr_matrix((nn_tris_weights, (row_ind, nn_tris.ravel())),
shape=(n_to_pts, n_from_pts))
morph_maps.append(this_map)
return morph_maps
def _find_nearest_tri_pt(pt_tris, to_pt, tri_geom, run_all=False):
"""Find nearest point mapping to a set of triangles
If run_all is False, if the point lies within a triangle, it stops.
If run_all is True, edges of other triangles are checked in case
those (somehow) are closer.
"""
# The following dense code is equivalent to the following:
# rr = r1[pt_tris] - to_pts[ii]
# v1s = np.sum(rr * r12[pt_tris], axis=1)
# v2s = np.sum(rr * r13[pt_tris], axis=1)
# aas = a[pt_tris]
# bbs = b[pt_tris]
# ccs = c[pt_tris]
# dets = aas * bbs - ccs * ccs
# pp = (bbs * v1s - ccs * v2s) / dets
# qq = (aas * v2s - ccs * v1s) / dets
# pqs = np.array(pp, qq)
# This einsum is equivalent to doing:
# pqs = np.array([np.dot(x, y) for x, y in zip(r1213, r1-to_pt)])
r1 = tri_geom['r1'][pt_tris]
rrs = to_pt - r1
tri_nn = tri_geom['nn'][pt_tris]
vect = np.einsum('ijk,ik->ij', tri_geom['r1213'][pt_tris], rrs)
mats = tri_geom['mat'][pt_tris]
# This einsum is equivalent to doing:
# pqs = np.array([np.dot(m, v) for m, v in zip(mats, vect)]).T
pqs = np.einsum('ijk,ik->ji', mats, vect)
found = False
dists = np.sum(rrs * tri_nn, axis=1)
# There can be multiple (sadness), find closest
idx = np.where(np.all(pqs >= 0., axis=0))[0]
idx = idx[np.where(np.all(pqs[:, idx] <= 1., axis=0))[0]]
idx = idx[np.where(np.sum(pqs[:, idx], axis=0) < 1.)[0]]
dist = np.inf
if len(idx) > 0:
found = True
pt = idx[np.argmin(np.abs(dists[idx]))]
p, q = pqs[:, pt]
dist = dists[pt]
# re-reference back to original numbers
pt = pt_tris[pt]
if found is False or run_all is True:
# don't include ones that we might have found before
s = np.setdiff1d(np.arange(len(pt_tris)), idx) # ones to check sides
# Tough: must investigate the sides
pp, qq, ptt, distt = _nearest_tri_edge(pt_tris[s], to_pt, pqs[:, s],
dists[s], tri_geom)
if np.abs(distt) < np.abs(dist):
p, q, pt, dist = pp, qq, ptt, distt
return p, q, pt, dist
def _nearest_tri_edge(pt_tris, to_pt, pqs, dist, tri_geom):
"""Get nearest location from a point to the edge of a set of triangles"""
# We might do something intelligent here. However, for now
# it is ok to do it in the hard way
aa = tri_geom['a'][pt_tris]
bb = tri_geom['b'][pt_tris]
cc = tri_geom['c'][pt_tris]
pp = pqs[0]
qq = pqs[1]
# Find the nearest point from a triangle:
# Side 1 -> 2
p0 = np.minimum(np.maximum(pp + 0.5 * (qq * cc) / aa,
0.0), 1.0)
q0 = np.zeros_like(p0)
# Side 2 -> 3
t1 = (0.5 * ((2.0 * aa - cc) * (1.0 - pp) +
(2.0 * bb - cc) * qq) / (aa + bb - cc))
t1 = np.minimum(np.maximum(t1, 0.0), 1.0)
p1 = 1.0 - t1
q1 = t1
# Side 1 -> 3
q2 = np.minimum(np.maximum(qq + 0.5 * (pp * cc) / bb, 0.0), 1.0)
p2 = np.zeros_like(q2)
# figure out which one had the lowest distance
dist0 = _get_tri_dist(pp, qq, p0, q0, aa, bb, cc, dist)
dist1 = _get_tri_dist(pp, qq, p1, q1, aa, bb, cc, dist)
dist2 = _get_tri_dist(pp, qq, p2, q2, aa, bb, cc, dist)
pp = np.r_[p0, p1, p2]
qq = np.r_[q0, q1, q2]
dists = np.r_[dist0, dist1, dist2]
ii = np.argmin(np.abs(dists))
p, q, pt, dist = pp[ii], qq[ii], pt_tris[ii % len(pt_tris)], dists[ii]
return p, q, pt, dist
def mesh_edges(tris):
"""Returns sparse matrix with edges as an adjacency matrix
Parameters
----------
tris : array of shape [n_triangles x 3]
The triangles.
Returns
-------
edges : sparse matrix
The adjacency matrix.
"""
if np.max(tris) > len(np.unique(tris)):
raise ValueError('Cannot compute connectivity on a selection of '
'triangles.')
npoints = np.max(tris) + 1
ones_ntris = np.ones(3 * len(tris))
a, b, c = tris.T
x = np.concatenate((a, b, c))
y = np.concatenate((b, c, a))
edges = coo_matrix((ones_ntris, (x, y)), shape=(npoints, npoints))
edges = edges.tocsr()
edges = edges + edges.T
return edges
def mesh_dist(tris, vert):
"""Compute adjacency matrix weighted by distances
It generates an adjacency matrix where the entries are the distances
between neighboring vertices.
Parameters
----------
tris : array (n_tris x 3)
Mesh triangulation
vert : array (n_vert x 3)
Vertex locations
Returns
-------
dist_matrix : scipy.sparse.csr_matrix
Sparse matrix with distances between adjacent vertices
"""
edges = mesh_edges(tris).tocoo()
# Euclidean distances between neighboring vertices
dist = np.sqrt(np.sum((vert[edges.row, :] - vert[edges.col, :]) ** 2,
axis=1))
dist_matrix = csr_matrix((dist, (edges.row, edges.col)), shape=edges.shape)
return dist_matrix
@verbose
def read_tri(fname_in, swap=False, verbose=None):
"""Function for reading triangle definitions from an ascii file.
Parameters
----------
fname_in : str
Path to surface ASCII file (ending with '.tri').
swap : bool
Assume the ASCII file vertex ordering is clockwise instead of
counterclockwise.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
rr : array, shape=(n_vertices, 3)
Coordinate points.
tris : int array, shape=(n_faces, 3)
Triangulation (each line contains indices for three points which
together form a face).
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
read_surface
write_surface
"""
with open(fname_in, "r") as fid:
lines = fid.readlines()
n_nodes = int(lines[0])
n_tris = int(lines[n_nodes + 1])
n_items = len(lines[1].split())
if n_items in [3, 6, 14, 17]:
inds = range(3)
elif n_items in [4, 7]:
inds = range(1, 4)
else:
raise IOError('Unrecognized format of data.')
rr = np.array([np.array([float(v) for v in l.split()])[inds]
for l in lines[1:n_nodes + 1]])
tris = np.array([np.array([int(v) for v in l.split()])[inds]
for l in lines[n_nodes + 2:n_nodes + 2 + n_tris]])
if swap:
tris[:, [2, 1]] = tris[:, [1, 2]]
tris -= 1
logger.info('Loaded surface from %s with %s nodes and %s triangles.' %
(fname_in, n_nodes, n_tris))
if n_items in [3, 4]:
logger.info('Node normals were not included in the source file.')
else:
warn('Node normals were not read.')
return (rr, tris)
|
bsd-3-clause
|
lioritan/Thesis
|
problems/parse_ents.py
|
1
|
1666
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 01 22:04:44 2016
@author: Lior
"""
import cPickle
import os
from matplotlib.cbook import flatten
with open('../yago2s_tsv/yago_relations.pkl', 'rb') as fptr:
relations= cPickle.load(fptr)
print 'yago loaded'
entities=set()
for path,dirnames, filenames in os.walk('./techtc_entities/'):
for filename in filenames:
fptr=open(path+'/'+filename, 'rb')
((trn_ents,_),(tst_ents,_))=cPickle.load(fptr)
entities.update(flatten(trn_ents))
entities.update(flatten(tst_ents))
fptr.close()
less_relations = {}
target_entities = set()
print len(entities)
for i,ent in enumerate(entities):
#print i
for rel in relations.keys():
if relations[rel].has_key(ent):
val = relations[rel][ent]
if rel!='YAGO:types' and rel!='types':
target_entities.add(val)
if not less_relations.has_key(rel):
less_relations[rel]={ent:val}
else:
less_relations[rel][ent]=val
#with open('tmp_rels.pkl','wb') as fptr:
# cPickle.dump((less_relations,target_entities), fptr, -1)
del entities
print len(target_entities)
for i,ent in enumerate(target_entities):
#print i
for rel in relations.keys():
if relations[rel].has_key(ent):
val = relations[rel][ent]
if not less_relations.has_key(rel):
less_relations[rel]={ent:val}
else:
less_relations[rel][ent]=val
del target_entities
with open('yago_rels_new.pkl','wb') as fptr:
cPickle.dump(less_relations, fptr, -1)
|
gpl-2.0
|
huoyao/epcc-openacc-benchmarks
|
plot.py
|
4
|
5869
|
# Copyright (c) 2013 The University of Edinburgh.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
# Plotter for OpenACC Benchmarks benchmarks
import numpy as np
import matplotlib.pyplot as plt
import sys
import argparse
class Globs:
"""Class for storing objects such as the session dictionary and a counter."""
def __init__(self):
self.colour = False
self.inputfile = 'data.txt'
self.debug = False
self.eps = False
def chomp(s):
return s[:-1] if s.endswith('\n') else s
def setAxLinesBW(ax):
"""
Take each Line2D in the axes, ax, and convert the line style to be
suitable for black and white viewing.
"""
MARKERSIZE = 3
COLORMAP = {
'b': {'marker': None, 'dash': (None,None)},
'g': {'marker': None, 'dash': [5,5]},
'r': {'marker': None, 'dash': [5,3,1,3]},
'c': {'marker': None, 'dash': [1,3]},
'm': {'marker': None, 'dash': [5,2,5,2,5,10]},
'y': {'marker': None, 'dash': [5,3,1,2,1,10]},
'k': {'marker': 'o', 'dash': (None,None)} #[1,2,1,10]}
}
for line in ax.get_lines():
origColor = line.get_color()
line.set_color('black')
line.set_dashes(COLORMAP[origColor]['dash'])
line.set_marker(COLORMAP[origColor]['marker'])
line.set_markersize(MARKERSIZE)
def setFigLinesBW(fig):
"""
Take each axes in the figure, and for each line in the axes, make the
line viewable in black and white.
"""
for ax in fig.get_axes():
setAxLinesBW(ax)
def main():
# Make a singular global instance for holding odd global values
global GL
GL = Globs()
# Parse the input arguements in a nice manner
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="Enable debug outputs.", action="store_true")
parser.add_argument("-i", "--input", help="Input file to read (default: ./data.txt).")
parser.add_argument("-c", "--colour", help="Colour graphs. (default: Black + White).", action="store_true")
parser.add_argument("-e", "--eps", help="Output graphs in Enhanced Postscript Format (.eps) rather than JPEG.", action="store_true")
args = parser.parse_args()
if args.debug:
GL.debug = True
if args.colour:
GL.colour = True
if args.input:
GL.inputfile = args.input
if args.eps:
GL.eps = True
# Open input file, consume file, close.
inp = open(GL.inputfile, 'r')
data_array = []
for l in inp:
data_array.append(chomp(l).split())
inp.close()
names = []
compilers = []
datasizes = []
for line in data_array:
names.append(line[1])
compilers.append(line[0])
datasizes.append(int(line[2]))
# Unique-ify these lists
names = list(set(names))
compilers = list(set(compilers))
datasizes = list(set(datasizes))
ds = np.zeros( (len(datasizes),1) )
datasizes = sorted(datasizes)
for i in range(0,len(datasizes)):
ds[i][0] = int(datasizes[i])
# Loop over the tests
# Create and array of times for each one and add data as necessary
# Then plot for each test
if GL.debug:
print names
print datasizes
print compilers
for i in names:
times = np.zeros( (len(datasizes),len(compilers)) )
for c in compilers:
for r in range(0, len(data_array)):
if (data_array[r][0] == c and data_array[r][1] == i):
times[datasizes.index(int(data_array[r][2]))][compilers.index(c)] = abs(float(data_array[r][3]))
ds2 = np.log(ds)/np.log(2)
unit = r'$\mu s$'
if np.amax(times) > 1000:
times = times / 1000
unit = r'$ms$'
if np.amax(times) > 1000000:
times = times / 1000000
unit = r'$s$'
fig = plt.figure()
plt.semilogy(ds2,times,'-', linewidth=3)
plt.xlabel('Datasize (M Bytes)', size='large')
if i=='Kernels_combined' or i=='Kernels_If' or i=='Parallel_If' or i=='Parallel_private' or i=='Parallel_firstprivate' or i=='Parallel_reduction' or i=='Kernels_reduction' or i=='Update_Host' or i=='Kernels_Invocation':
plt.ylabel('Difference (' + unit + ')', size='large')
else:
plt.ylabel('Run time (' + unit + ')', size='large')
locs, labs = plt.xticks()
lmax = max(locs)+1
lmin = min(locs)
plt.xticks(np.arange(lmin,lmax), ('1','2','4','8','16','32','64','128','256','512','1024'),size='large')
xmin,xmax = plt.xlim()
plt.xlim(xmin*0.99,xmax*1.01)
plt.yticks(size='large')
i = i.replace('_',' ')
plt.title(i, size='large')
if GL.colour == False:
setFigLinesBW(fig)
comp_normalized = []
for c in compilers:
c = c.replace('_', ' ')
comp_normalized.append(c)
plt.legend(comp_normalized, loc='best')
if GL.eps == True:
plt.savefig(i+'.eps', dpi=660, bbox_inches='tight')
else:
plt.savefig(i+'.jpg', bbox_inches='tight')
plt.close()
if __name__ == "__main__":
sys.exit(main())
|
apache-2.0
|
spnow/grr
|
gui/plugins/flow_management.py
|
1
|
29906
|
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""GUI elements allowing launching and management of flows."""
import os
import StringIO
import urllib
import matplotlib.pyplot as plt
from grr.gui import renderers
from grr.gui.plugins import crash_view
from grr.gui.plugins import fileview
from grr.gui.plugins import forms
from grr.gui.plugins import semantic
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import utils
class LaunchFlows(renderers.Splitter):
"""Launches a new flow."""
description = "Start new flows"
behaviours = frozenset(["Host"])
order = 10
left_renderer = "FlowTree"
top_right_renderer = "SemanticProtoFlowForm"
bottom_right_renderer = "FlowManagementTabs"
class FlowTree(renderers.TreeRenderer):
"""Show all flows in a tree.
Generated Javascript Events:
- flow_select(flow_path) - The full path for the flow name (category +
name).
"""
publish_select_queue = "flow_select"
# Only show flows in the tree that specify all of these behaviours in their
# behaviours attribute.
flow_behaviors_to_render = flow.FlowBehaviour("Client Flow")
def EnumerateCategories(self, path, request, flow_behaviors_to_render):
"""Search through all flows for categories starting with path."""
categories = set()
flows = set()
# Use an object for path manipulations.
path = rdfvalue.RDFURN(path)
for name, cls in flow.GRRFlow.classes.items():
# Flows without a category do not show up in the GUI.
if not getattr(cls, "category", None):
continue
# If a flow is tagged as AUTHORIZED_LABELS, the user must have the correct
# label to see it.
if cls.AUTHORIZED_LABELS:
if not data_store.DB.security_manager.CheckUserLabels(
request.token.username, cls.AUTHORIZED_LABELS, token=request.token):
continue
# Skip if there are behaviours that are not supported by the class.
if not flow_behaviors_to_render.IsSupported(cls.behaviours):
continue
category = rdfvalue.RDFURN(cls.category)
if category == path:
flows.add((name, cls.friendly_name))
else:
relative_path = category.RelativeName(path)
# This category starts with this path
if relative_path is not None:
categories.add(relative_path.split("/")[0])
return categories, flows
def RenderBranch(self, path, request):
"""Renders tree leafs for flows."""
# Retrieve the user's GUI mode preferences.
self.user = request.user
try:
user_record = aff4.FACTORY.Open(
aff4.ROOT_URN.Add("users").Add(self.user), "GRRUser",
token=request.token)
user_preferences = user_record.Get(user_record.Schema.GUI_SETTINGS)
except IOError:
user_preferences = aff4.GRRUser.SchemaCls.GUI_SETTINGS()
flow_behaviors_to_render = (self.flow_behaviors_to_render +
user_preferences.mode)
categories, flows = self.EnumerateCategories(path, request,
flow_behaviors_to_render)
for category in sorted(categories):
self.AddElement(category)
for name, friendly_name in sorted(flows):
self.AddElement(name, behaviour="leaf", friendly_name=friendly_name)
class FlowManagementTabs(renderers.TabLayout):
"""Show information about the flows.
Listening Javascript Events:
- flow_select(flow_path) - A selection event on the tree informing us of the
flow path. The basename of flow_path is the name of the flow.
Internal State:
- flow_path - The category and name of the flow we display.
"""
names = ["Flow Information", "Current Running Flows"]
delegated_renderers = ["FlowInformation", "ListFlowsTable"]
tab_hash = "ft"
layout_template = renderers.TabLayout.layout_template + """
<script>
grr.subscribe('flow_select', function (path) {
$("#{{unique|escapejs}}").data().state.flow_path = path;
$("#{{unique|escapejs}} li.active a").click();
}, "{{unique|escapejs}}");
</script>"""
def Layout(self, request, response):
self.state = dict(flow_path=request.REQ.get("flow_path"),
client_id=request.REQ.get("client_id"))
return super(FlowManagementTabs, self).Layout(request, response)
class FlowInformation(renderers.TemplateRenderer):
"""Displays information about the flow.
Post Parameters:
- flow_path: The category + flow name for use to display.
"""
layout_template = renderers.Template("""
<h3>{{ this.flow_name|escape }}</h3>
<p>{{ this.flow_doc|linebreaks }}</p>
<pre>
Prototype: {{ this.prototype|escape }}
{{ this.prototype_doc|escape }}
</pre>
<table class="table table-condensed table-bordered full-width fixed-columns">
<colgroup>
<col style="width: 20%" />
<col style="width: 60%" />
<col style="width: 20%" />
</colgroup>
<thead>
<tr>
<th class="ui-state-default">State</th>
<th class="ui-state-default">Description</th>
<th class="ui-state-default">Next States</th></tr>
</thead>
<tbody>
{% for state, doc, next in this.states %}
<tr><td class='state'>{{ state|escape }}</td>
<td class='description'>{{ doc|escape }}</td>
<td class='text'>{{ next|escape }}</td></tr>
{% endfor %}
</tbody>
</table>
""")
# This is prepended to flow args to eliminate clashes with other parameters.
arg_prefix = "v_"
def Layout(self, request, response):
"""Update the progress bar based on the progress reported."""
self.flow_name = request.REQ.get("flow_path", "").split("/")[-1]
try:
flow_class = flow.GRRFlow.classes[self.flow_name]
if not aff4.issubclass(flow_class, flow.GRRFlow):
return response
except KeyError:
return response
self.states = []
# Fill in information about each state
for state_method in flow_class.__dict__.values():
try:
next_states = state_method.next_states
# Only show the first line of the doc string.
try:
func_doc = state_method.func_doc.split("\n")[0].strip()
except AttributeError:
func_doc = ""
self.states.append((state_method.func_name,
func_doc, ", ".join(next_states)))
except AttributeError:
pass
# Now fill in information about each arg to this flow.
prototypes = []
for type_descriptor in flow_class.args_type.type_infos:
if not type_descriptor.hidden:
prototypes.append("%s" % (type_descriptor.name))
self.prototype = "%s(%s)" % (flow_class.__name__, ", ".join(prototypes))
self.flow_doc = flow_class.__doc__
return super(FlowInformation, self).Layout(request, response)
class SemanticProtoFlowForm(renderers.TemplateRenderer):
"""Render a flow based on its semantic information."""
layout_template = renderers.Template("""
<div class="FormBody" id="{{unique|escape}}">
{% if this.flow_found %}
<form id='form_{{unique|escape}}' class="form-horizontal FormData"
data-flow_path='{{this.flow_name|escape}}'
data-dom_node='{{id|escape}}'
>
{{this.form|safe}}
<hr/>
{{this.runner_form|safe}}
<div class="control-group">
<div class="controls">
<button id='submit_{{unique|escape}}' class="btn btn-success Launch" >
Launch
</button>
</div>
</div>
</form>
{% else %}
Please Select a flow to launch from the tree on the left.
{% endif %}
</div>
<div id="contents_{{unique}}"></div>
<script>
$("#submit_{{unique|escapejs}}").click(function () {
var state = {};
$.extend(state, $('#form_{{unique|escapejs}}').data(), grr.state);
grr.update('{{renderer}}', 'contents_{{unique|escapejs}}',
state);
return false;
});
grr.subscribe('flow_select', function(path) {
grr.layout("{{renderer|escapejs}}", "{{id|escapejs}}", {
flow_path: path,
client_id: grr.state.client_id,
reason: grr.state.reason
});
}, '{{unique|escapejs}}');
</script>
""") + renderers.TemplateRenderer.help_template
ajax_template = renderers.Template("""
<pre>
{{this.args}}
</pre>
<pre>
{{this.runner_args}}
</pre>
Launched Flow {{this.flow_name}}.
<script>
$("#{{this.dom_node|escapejs}} .FormBody").html("");
grr.subscribe('flow_select', function(path) {
grr.layout("{{renderer|escapejs}}", "{{id|escapejs}}", {
flow_path: path,
client_id: grr.state.client_id,
reason: grr.state.reason
});
}, '{{unique|escapejs}}');
</script>
""")
ajax_error_template = renderers.Template("""
<script>
grr.publish("grr_messages", "{{error|escapejs}}");
grr.publish("grr_traceback", "{{error|escapejs}}");
</script>
""")
context_help_url = "user_manual.html#_flows"
def Layout(self, request, response):
"""Render the form for creating the flow args."""
self.flow_name = os.path.basename(request.REQ.get("flow_path", ""))
self.flow_cls = flow.GRRFlow.classes.get(self.flow_name)
if aff4.issubclass(self.flow_cls, flow.GRRFlow):
self.flow_found = True
self.form = forms.SemanticProtoFormRenderer(
self.flow_cls.GetDefaultArgs(token=request.token),
prefix="args").RawHTML(request)
self.runner_form = forms.SemanticProtoFormRenderer(
flow.FlowRunnerArgs(flow_name=self.flow_name),
prefix="runner").RawHTML(request)
return super(SemanticProtoFlowForm, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Parse the flow args from the form and launch the flow."""
self.flow_name = request.REQ.get("flow_path", "").split("/")[-1]
self.client_id = request.REQ.get("client_id", None)
self.dom_node = request.REQ.get("dom_node")
flow_cls = flow.GRRFlow.classes.get(self.flow_name)
if flow_cls is not None:
self.args = forms.SemanticProtoFormRenderer(
flow_cls.args_type(), prefix="args").ParseArgs(request)
try:
self.args.Validate()
except ValueError as e:
return self.RenderFromTemplate(self.ajax_error_template,
response, error=e)
self.runner_args = forms.SemanticProtoFormRenderer(
flow.FlowRunnerArgs(), prefix="runner_").ParseArgs(request)
self.runner_args.Validate()
self.flow_id = flow.GRRFlow.StartFlow(client_id=self.client_id,
flow_name=self.flow_name,
token=request.token,
args=self.args,
runner_args=self.runner_args)
return renderers.TemplateRenderer.Layout(
self, request, response, apply_template=self.ajax_template)
class FlowFormCancelAction(renderers.TemplateRenderer):
"""Handle submission of a Cancel Flow button press.
Post Parameters:
- flow_id: The flow to cancel.
"""
layout_template = renderers.Template("")
def Layout(self, request, response):
# We can't terminate flow directly through flow.GRRFlow.TerminateFlow as
# it requires writing to the datastore. We're not allowed to do it from
# the GUI. Therefore we use dedicated TerminateFlow flow.
flow.GRRFlow.StartFlow(
flow_name="TerminateFlow",
flow_urn=rdfvalue.RDFURN(request.REQ.get("flow_id")),
reason="Cancelled in GUI", token=request.token)
super(FlowFormCancelAction, self).Layout(request, response)
class FlowStateIcon(semantic.RDFValueRenderer):
"""Render the flow state by using an icon."""
layout_template = renderers.Template("""
<div class="centered">
<img class='grr-icon grr-flow-icon'
src='/static/images/{{this.icon|escape}}'
title='{{this.title|escape}}'
/>
</div>""")
# Maps the flow states to icons we can show
state_map = {"TERMINATED": ("stock_yes.png", "Flow finished normally."),
"RUNNING": ("clock.png", "Flow is still running."),
"ERROR": ("nuke.png", "Flow terminated with an error."),
"CLIENT_CRASHED": (
"skull-icon.png",
"The client crashed while executing this flow.")}
icon = "question-red.png"
def Layout(self, request, response):
try:
self.icon, self.title = self.state_map[str(self.proxy)]
except (KeyError, ValueError):
pass
super(FlowStateIcon, self).Layout(request, response)
class ManageFlows(renderers.Splitter2Way):
"""View launched flows in a tree."""
description = "Manage launched flows"
behaviours = frozenset(["Host"])
order = 20
top_renderer = "ListFlowsTable"
bottom_renderer = "FlowTabView"
class FlowTabView(renderers.TabLayout):
"""Show various flow information in a Tab view.
Listening Javascript Events:
- flow_table_select(flow_aff4_path) - A selection event on the tree
informing us of the flow aff4 path. The basename of flow_path is the name
of the flow.
Internal State:
- flow_path - The category and name of the flow we display.
"""
names = ["Flow Information", "Requests"]
delegated_renderers = ["ShowFlowInformation", "FlowRequestView"]
tab_hash = "ftv"
layout_template = renderers.TabLayout.layout_template + """
<script>
grr.subscribe('flow_table_select', function (path) {
grr.layout("{{renderer|escapejs}}", "{{id|escapejs}}",
{flow: path, client_id: grr.state.client_id});
}, "tab_contents_{{unique|escapejs}}");
</script>"""
def Layout(self, request, response):
req_flow = request.REQ.get("flow")
if req_flow:
self.state["flow"] = req_flow
client_id = request.REQ.get("client_id")
if client_id:
self.state["client_id"] = client_id
return super(FlowTabView, self).Layout(request, response)
class FlowRequestView(renderers.TableRenderer):
"""View outstanding requests for a flow.
Post Parameters:
- client_id: The client to show the flows for.
- flow: The flow to show.
"""
post_parameters = ["flow", "client_id"]
def __init__(self, **kwargs):
super(FlowRequestView, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn("ID"))
self.AddColumn(semantic.RDFValueColumn("Request", width="100%"))
self.AddColumn(semantic.RDFValueColumn("Last Response", width="100%"))
def BuildTable(self, start_row, end_row, request):
session_id = request.REQ.get("flow", "")
if not session_id:
return
manager = queue_manager.QueueManager(token=request.token)
for i, (request, responses) in enumerate(
manager.FetchRequestsAndResponses(
rdfvalue.RDFURN(session_id))):
if request.id == 0:
continue
if i < start_row:
continue
if i > end_row:
break
# Tie up the request to each response to make it easier to render.
self.AddCell(i, "ID",
manager.FLOW_REQUEST_TEMPLATE % request.id)
self.AddCell(i, "Request", request)
if responses:
self.AddCell(i, "Last Response", responses[-1])
class TreeColumn(semantic.RDFValueColumn, renderers.TemplateRenderer):
"""A specialized column which adds tree controls."""
template = renderers.Template("""
{% if this.branch %}
<span depth='{{this.depth|escape}}'
onclick='grr.table.toggleChildRows(this, "{{this.value|escapejs}}");'
style='margin-left: {{this.depth|escape}}em;'
class='tree_closed tree_branch'/>
{% else %}
<span depth='{{this.depth|escape}}' class='tree_leaf'
style='margin-left: {{this.depth|escape}}em;' />
{% endif %}
""")
def AddElement(self, index, element, depth, row_type):
self.rows[index] = (element, depth, row_type == "branch")
def RenderRow(self, index, request, row_options):
"""Renders the cell with additional tree controls."""
self.value, self.depth, self.branch = self.rows.get(index, ("", 0, "leaf"))
self.index = index
row_options["row_id"] = index
renderer = self.renderer
if renderer is None:
# What is the RDFValueRenderer for this attribute?
renderer = semantic.RDFValueRenderer.RendererForRDFValue(
self.value.__class__.__name__)
# Intantiate the renderer and return the HTML
if renderer:
result = renderer(self.value).RawHTML(request)
else:
result = utils.SmartStr(self.value)
return self.FormatFromTemplate(self.template, value=result,
index=index, this=self)
class FlowColumn(TreeColumn):
"""A specialized tree/column for sessions."""
template = """
<div id='cancel_{{this.index|escape}}' flow_id="{{this.value|escape}}"
style='float: left'>
</div>""" + TreeColumn.template + """
{{this.row_name|safe}}
"""
def __init__(self, *args, **kwargs):
super(FlowColumn, self).__init__(*args, **kwargs)
self.rows_names = {}
def AddElement(self, index, element, depth, row_type, row_name):
self.rows_names[index] = row_name
super(FlowColumn, self).AddElement(index, element, depth, row_type)
def RenderRow(self, index, request, row_options):
self.row_name = self.rows_names.get(index, "")
return super(FlowColumn, self).RenderRow(index, request, row_options)
class ListFlowsTable(renderers.TableRenderer):
"""List all flows for a client in a table.
Generated Javascript Events:
- flow_table_select(flow): The flow id that the user has selected.
Post Parameters:
- client_id: The client to show the flows for.
"""
selection_publish_queue = "flow_table_select"
with_toolbar = True
layout_template = """
{% if this.with_toolbar %}
<div id="toolbar_{{unique|escape}}" class="breadcrumb">
<li>
<button id="cancel_flow_{{unique|escape}}" title="Cancel Selected Flows"
class="btn" name="cancel_flow">
<img src="/static/images/editdelete.png" class="toolbar_icon">
</button>
</li>
</div>
{% endif %}
""" + renderers.TableRenderer.layout_template + """
<script>
$("#cancel_flow_{{unique|escapejs}}").click(function () {
/* Find all selected rows and cancel them. */
$("#table_{{id|escape}}")
.find("tr.row_selected div[flow_id]")
.each(function () {
var flow_id = $(this).attr('flow_id');
var id = $(this).attr('id');
/* Cancel the flow, and then reset the icon. */
grr.layout("FlowFormCancelAction", id,
{flow_id: flow_id}, function () {
$('#table_{{id|escapejs}}').trigger('refresh');
});
});
});
//Receive the selection event and emit a session_id
grr.subscribe("select_table_{{ id|escapejs }}", function(node) {
if (node) {
flow = node.find("div[flow_id]").attr('flow_id');
if (flow) {
grr.publish("{{ this.selection_publish_queue|escapejs }}",
flow);
};
};
}, '{{ unique|escapejs }}');
/* Update the flow view from the hash. */
if(grr.hash.flow) {
// NOTE(mbushkov): delay is needed for cases when flow list and flow
// information are rendered as parts of the same renderer. In that
// case the ShowFlowInformation renderer won't be able to react on the
// click because it subscribes for the flow_table_select event after
// the code below is executed.
window.setTimeout(function () {
$('div[flow_id="' + grr.hash.flow +'"]').parents('tr').click();
}, 1);
}
</script>
"""
def _GetCreationTime(self, obj):
try:
return obj.state.context.get("create_time")
except AttributeError:
return obj.Get(obj.Schema.LAST, 0)
def __init__(self, **kwargs):
super(ListFlowsTable, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn(
"State", renderer=FlowStateIcon, width="40px"))
self.AddColumn(FlowColumn("Path", renderer=semantic.SubjectRenderer,
width="20%"))
self.AddColumn(semantic.RDFValueColumn("Flow Name", width="20%"))
self.AddColumn(semantic.RDFValueColumn("Creation Time", width="20%"))
self.AddColumn(semantic.RDFValueColumn("Last Active", width="20%"))
self.AddColumn(semantic.RDFValueColumn("Creator", width="20%"))
def BuildTable(self, start_row, end_row, request):
"""Renders the table."""
depth = request.REQ.get("depth", 0)
flow_urn = self.state.get("value", request.REQ.get("value"))
if flow_urn is None:
client_id = request.REQ.get("client_id")
if not client_id: return
flow_urn = rdfvalue.RDFURN(client_id).Add("flows")
flow_root = aff4.FACTORY.Open(flow_urn, mode="r", token=request.token)
root_children = list(flow_root.OpenChildren())
root_children = sorted(root_children,
key=self._GetCreationTime,
reverse=True)
self.size = len(root_children)
if not depth:
root_children = root_children[start_row:end_row]
level2_children = dict(aff4.FACTORY.MultiListChildren(
[f.urn for f in root_children], token=request.token))
row_index = start_row
for flow_obj in root_children:
if level2_children.get(flow_obj.urn, None):
row_type = "branch"
else:
row_type = "leaf"
row = {}
last = flow_obj.Get(flow_obj.Schema.LAST)
if last:
row["Last Active"] = last
if isinstance(flow_obj, aff4.AFF4Object.GRRFlow):
row_name = flow_obj.urn.Basename()
try:
if flow_obj.Get(flow_obj.Schema.CLIENT_CRASH):
row["State"] = "CLIENT_CRASHED"
else:
row["State"] = flow_obj.state.context.state
row["Flow Name"] = flow_obj.state.context.args.flow_name
row["Creation Time"] = flow_obj.state.context.create_time
row["Creator"] = flow_obj.state.context.creator
except AttributeError:
row["Flow Name"] = "Failed to open flow."
else:
# We're dealing with a hunt here.
row_name = flow_obj.urn.Dirname()
row["Flow Name"] = "Hunt"
self.columns[1].AddElement(row_index, flow_obj.urn, depth, row_type,
row_name)
self.AddRow(row, row_index)
row_index += 1
# The last row we wrote.
return row_index
class ShowFlowInformation(fileview.AFF4Stats):
"""Display information about the flow.
Post Parameters:
- flow: The flow id we will display.
Internal State:
- client_id, flow
"""
selection_publish_queue = "flow_table_select"
historical_renderer = "HistoricalFlowView"
# Embed the regular AFF4Stats inside a container to allow scrolling
layout_template = renderers.Template("""
<div id="container_{{unique|escapejs}}" class="FormBody">
{% if this.path %}
""" + str(fileview.AFF4Stats.layout_template) + """
<br/>
{% else %}
Please select a flow to manage from the above table.
{% endif %}
</div>
""")
def Layout(self, request, response):
"""Introspect the Schema for flow objects."""
try:
self.state["flow"] = session_id = request.REQ["flow"]
self.fd = aff4.FACTORY.Open(session_id, token=request.token,
age=aff4.ALL_TIMES)
self.classes = self.RenderAFF4Attributes(self.fd, request)
self.path = self.fd.urn
except (KeyError, IOError):
self.path = None
# Skip our parent's Layout method.
return super(fileview.AFF4Stats, self).Layout(request, response)
class HistoricalFlowView(fileview.HistoricalView):
"""View historical attributes for the flow."""
def Layout(self, request, response):
self.state = dict(flow=request.REQ.get("flow"),
attribute=request.REQ.get("attribute"))
self.AddColumn(semantic.RDFValueColumn(self.state["attribute"]))
return renderers.TableRenderer.Layout(self, request, response)
def BuildTable(self, start_row, end_row, request):
"""Populate the table with attribute values."""
flow_name = request.REQ.get("flow")
attribute_name = request.REQ.get("attribute")
if attribute_name is None:
return
self.AddColumn(semantic.RDFValueColumn(attribute_name))
fd = aff4.FACTORY.Open(flow_name, token=request.token, age=aff4.ALL_TIMES)
self.BuildTableFromAttribute(attribute_name, fd, start_row, end_row)
class FlowPBRenderer(semantic.RDFProtoRenderer):
"""Format the FlowPB protobuf."""
classname = "Flow"
name = "Flow Protobuf"
backtrace_template = renderers.Template("""
<div id='hidden_pre_{{name|escape}}'>
<ins class='fg-button ui-icon ui-icon-minus'/>
{{error_msg|escape}}
<div class='contents'>
<pre>{{value|escape}}</pre>
</div>
</div>
<script>
$('#hidden_pre_{{name|escape}}').click(function () {
$(this).find('ins').toggleClass('ui-icon-plus ui-icon-minus');
$(this).find('.contents').toggle();
}).click();
</script>
""")
def RenderBacktrace(self, descriptor, value):
error_msg = value.rstrip().split("\n")[-1]
return self.FormatFromTemplate(self.backtrace_template, value=value,
name=descriptor.name, error_msg=error_msg)
# Pretty print these special fields.
translator = dict(
backtrace=RenderBacktrace,
pickle=semantic.RDFProtoRenderer.Ignore,
children=semantic.RDFProtoRenderer.Ignore,
network_bytes_sent=semantic.RDFProtoRenderer.HumanReadableBytes)
class FlowNotificationRenderer(semantic.RDFValueRenderer):
"""Renders notifications inside the FlowRenderer."""
classname = "Notification"
# Note here that following href e.g. right click new tab will give a fresh URL
# but clicking will maintain state of other tabs.
layout_template = renderers.Template("""
{% if this.proxy.type == "ViewObject" %}
<a id="{{unique}}" href="/#{{this.BuildHash|escape}}"
target_hash="{{this.BuildHash|escape}}">
{{this.proxy.subject|escape}}</a>
{% endif %}
{{this.proxy.message|escape}}
<script>
$("#{{unique|escape}}").click(function(){
grr.loadFromHash($(this).attr("target_hash"));
});
</script>
""")
def BuildHash(self):
"""Build hash string to navigate to the appropriate location."""
h = {}
path = rdfvalue.RDFURN(self.proxy.subject)
components = path.Path().split("/")[1:]
h["c"] = components[0]
h["path"] = "/".join(components[1:])
h["t"] = renderers.DeriveIDFromPath("/".join(components[1:-1]))
h["main"] = "VirtualFileSystemView"
return urllib.urlencode(
sorted([(x, utils.SmartStr(y)) for x, y in h.items()]))
class ClientCrashesRenderer(crash_view.ClientCrashCollectionRenderer):
"""View launched flows in a tree."""
description = "Crashes"
behaviours = frozenset(["HostAdvanced"])
order = 50
def Layout(self, request, response):
client_id = request.REQ.get("client_id")
self.crashes_urn = aff4.ROOT_URN.Add(client_id).Add("crashes")
super(ClientCrashesRenderer, self).Layout(request, response)
class ProgressGraphRenderer(renderers.ImageDownloadRenderer):
def Content(self, request, _):
"""Generates the actual image to display."""
flow_id = request.REQ.get("flow_id")
flow_obj = aff4.FACTORY.Open(flow_id, age=aff4.ALL_TIMES)
log = list(flow_obj.GetValuesForAttribute(flow_obj.Schema.LOG))
create_time = flow_obj.state.context.create_time / 1000000
plot_data = [(int(x.age) / 1000000, int(str(x).split(" ")[1]))
for x in log if "bytes" in str(x)]
plot_data.append((create_time, 0))
plot_data = sorted([(x - create_time, y) for (x, y) in plot_data])
x = [a for (a, b) in plot_data]
y = [b for (a, b) in plot_data]
params = {"backend": "png"}
plt.rcParams.update(params)
plt.figure(1)
plt.clf()
plt.plot(x, y)
plt.title("Progress for flow %s" % flow_id)
plt.xlabel("Time (s)")
plt.ylabel("Bytes downloaded")
plt.grid(True)
buf = StringIO.StringIO()
plt.savefig(buf)
buf.seek(0)
return buf.read()
class GlobalLaunchFlows(renderers.Splitter):
"""Launches flows that apply across clients."""
description = "Start Global Flows"
behaviours = frozenset(["General"])
order = 10
left_renderer = "GlobalFlowTree"
top_right_renderer = "SemanticProtoFlowForm"
bottom_right_renderer = "FlowManagementTabs"
class GlobalFlowTree(FlowTree):
"""Show flows that work across clients."""
publish_select_queue = "flow_select"
flow_behaviors_to_render = flow.FlowBehaviour("Global Flow")
class GlobExpressionFormRenderer(forms.StringTypeFormRenderer):
"""A renderer for glob expressions with autocomplete."""
type = rdfvalue.GlobExpression
layout_template = ("""<div class="control-group">
""" + forms.TypeDescriptorFormRenderer.default_description_view + """
<div class="controls">
<input id='{{this.prefix}}'
type=text
{% if this.default %}
value='{{ this.default|escape }}'
{% endif %}
onchange="grr.forms.inputOnChange(this)"
class="unset input-xxlarge"/>
</div>
</div>
<script>
grr.glob_completer.Completer("{{this.prefix}}", {{this.completions|safe}});
</script>
""")
def AddProtoFields(self, name, attribute_type):
for type_info in attribute_type.type_infos:
self.completions.append("%s.%s" % (name, type_info.name))
def _HandleType(self, name, attribute_type):
# Skip these types.
if attribute_type in (rdfvalue.Dict,):
return
# RDFValueArray contain a specific type.
elif issubclass(attribute_type, rdfvalue.RDFValueArray):
self._HandleType(name, attribute_type.rdf_type)
# Semantic Protobufs just contain their own fields.
elif issubclass(attribute_type, rdfvalue.RDFProtoStruct):
self.AddProtoFields(name, attribute_type)
else:
self.completions.append(name)
def Layout(self, request, response):
self.completions = []
for attribute in aff4.AFF4Object.VFSGRRClient.SchemaCls.ListAttributes():
if attribute.name:
self._HandleType(attribute.name, attribute.attribute_type)
return super(GlobExpressionFormRenderer, self).Layout(request, response)
|
apache-2.0
|
bjodah/chemreac
|
examples/equilibrium.py
|
2
|
10358
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Equilibrium
-----------
:download:`examples/equilibrium.py` demonstrates how
scaling can be used together with tolerances to achieve
desired accuracy from the numerical integration.
We will consider the transient towards an equilibrium
for a dimerization:
.. math ::
A + &B &\\overset{k_f}{\\underset{k_b}{\\rightleftharpoons}} C
The analytic solution is (its derivation is left as an exercise):
.. math::
A(t) &= A_0 - x(t) \\\\
B(t) &= B_0 - x(t) \\\\
C(t) &= C_0 + x(t) \\\\
x(t) &= \\frac{(U-b)(U+b)(e^{Ut}-1)}{2k_f(Ue^{Ut} + U - qe^{Ut} + q)} \\\\
where
.. math::
U &= \\sqrt{A^2k_f^2 + 2ABk_f^2 - 2Ak_bk_f + B^2k_f^2 -
2Bk_bk_f + 4Ck_bk_f + k_b^2} \\\\
q &= Ak_f + Bk_f - k_b
::
$ python equilibrium.py --help
.. exec::
echo "::\\\\n\\\\n"
python examples/examples/equilibrium.py --help | sed "s/^/ /"
Here is an example generated by:
::
$ python equilibrium.py --plot --savefig equilibrium.png
.. image:: ../_generated/equilibrium.png
If concentrations are far from 1 (and below abstol) the accuracy of
the numerical solution will be very poor:
::
$ python equilibrium.py --A0 1.0 --B0 1e-10 --C0 1e-30 --kf 10 --kb 1 --t0 0\
--tend 5 --plot --plotlogy --plotlogt --savefig equilibrium_unscaled.png
.. image:: ../_generated/equilibrium_unscaled.png
But by scaling the concentrations so that the smallest is well above the
absolute tolerance we can get accurate results:
::
$ python equilibrium.py --scaling 1e10 --A0 1.0 --B0 1e-10 --C0 1e-30 --kf 10\
--kb 1 --t0 0 --tend 5 --plot --plotlogy --plotlogt --savefig\
equilibrium_scaled.png
.. image:: ../_generated/equilibrium_scaled.png
"""
from __future__ import absolute_import, division, print_function
import argh
import numpy as np
from chemreac import ReactionDiffusion
from chemreac.integrate import Integration
from chemreac.units import (
SI_base_registry, second, molar, to_unitless, get_derived_unit
)
from chemreac.util.plotting import (
save_and_or_show_plot, plot_solver_linear_error,
plot_solver_linear_excess_error
)
def _algebraic_sigmoid(x, power, limit):
# Avoid overflow in exp()
return x/((x/limit)**power+1)**(-1/power)
def analytic_x(t, A, B, C, kf, kb, _exp=np.exp):
"""
Analytic solution to the dimeriztion reaction:
A + B <=> C; (K = kf/kb)
"""
q = -A*kf - B*kf - kb
U = (A**2*kf**2 - 2*A*B*kf**2 + 2*A*kb*kf + B**2*kf**2 +
2*B*kb*kf + 4*C*kb*kf + kb**2)**0.5
expUt = _exp(U*t)
return -(U - q)*(U + q)*(1 - 1/expUt)/(2*kf*(U + U/expUt - q + q/expUt))
def _get_Cref(t, y0, k, use_mpmath=True):
""" convenience function for generating reference trajectory """
if use_mpmath:
import mpmath as mp
mp.mp.dps = 30 # number of significant figures
y0 = [mp.mpf(_) for _ in y0]
k = [mp.mpf(_) for _ in k]
_exp = np.vectorize(mp.exp)
else:
def _exp(x):
return np.exp(_algebraic_sigmoid(np.asarray(x), 8, 350))
A, B, C = y0
kf, kb = k
x = analytic_x(t, A, B, C, kf, kb, _exp).reshape((t.size, 1))
dy = np.hstack((-x, -x, x))
res = y0 + dy
if use_mpmath:
res = np.array(res, dtype=np.float64)
return res
def integrate_rd(
tend=1.9, A0=4.2, B0=3.1, C0=1.4, nt=100, t0=0.0, kf=0.9, kb=0.23,
atol='1e-7,1e-6,1e-5', rtol='1e-6', integrator='scipy', method='bdf',
logy=False, logt=False, num_jac=False, plot=False, savefig='None',
splitplots=False, plotlogy=False, plotsymlogy=False, plotlogt=False,
scale_err=1.0, scaling=1.0, verbose=False):
"""
Runs the integration and (optionally) plots:
- Individual concentrations as function of time
- Reaction Quotient vs. time (with equilibrium constant as reference)
- Numerical error commited (with tolerance span plotted)
- Excess error committed (deviation outside tolerance span)
Concentrations (A0, B0, C0) are taken to be in "M" (molar),
kf in "M**-1 s**-1" and kb in "s**-1", t0 and tend in "s"
"""
rtol = float(rtol)
atol = list(map(float, atol.split(',')))
if len(atol) == 1:
atol = atol[0]
registry = SI_base_registry.copy()
registry['amount'] = 1.0/scaling*registry['amount']
registry['length'] = registry['length']/10 # decimetre
kf = kf/molar/second
kb = kb/second
rd = ReactionDiffusion.nondimensionalisation(
3, [[0, 1], [2]], [[2], [0, 1]], [kf, kb], logy=logy, logt=logt,
unit_registry=registry)
C0 = np.array([A0, B0, C0])*molar
if plotlogt:
eps = 1e-16
tout = np.logspace(np.log10(t0+eps), np.log10(tend+eps), nt)*second
else:
tout = np.linspace(t0, tend, nt)*second
integr = Integration(
rd, C0, tout, integrator=integrator, atol=atol, rtol=rtol,
with_jacobian=not num_jac, method=method)
Cout = integr.with_units('Cout')
yout, info = integr.yout, integr.info
try:
import mpmath
assert mpmath # silence pyflakes
except ImportError:
use_mpmath = False
else:
use_mpmath = True
time_unit = get_derived_unit(registry, 'time')
conc_unit = get_derived_unit(registry, 'concentration')
Cref = _get_Cref(
to_unitless(tout - tout[0], time_unit),
to_unitless(C0, conc_unit),
[to_unitless(kf, 1/time_unit/conc_unit),
to_unitless(kb, 1/time_unit)],
use_mpmath
).reshape((nt, 1, 3))*conc_unit
if verbose:
print(info)
if plot:
npltcols = 3 if splitplots else 1
import matplotlib.pyplot as plt
plt.figure(figsize=(18 if splitplots else 6, 10))
def subplot(row=0, idx=0, adapt_yscale=True, adapt_xscale=True,
span_all_x=False):
offset = idx if splitplots else 0
ax = plt.subplot(4, 1 if span_all_x else npltcols,
1 + row*npltcols + offset)
if adapt_yscale:
if plotlogy:
ax.set_yscale('log')
elif plotsymlogy:
ax.set_yscale('symlog')
if adapt_xscale and plotlogt:
ax.set_xscale('log')
return ax
tout_unitless = to_unitless(tout, second)
c = 'rgb'
for i, l in enumerate('ABC'):
# Plot solution trajectory for i:th species
ax_sol = subplot(0, i)
ax_sol.plot(tout_unitless, to_unitless(Cout[:, 0, i], molar),
label=l, color=c[i])
if splitplots:
# Plot relative error
ax_relerr = subplot(1, 1)
ax_relerr.plot(
tout_unitless, Cout[:, 0, i]/Cref[:, 0, i] - 1.0,
label=l, color=c[i])
ax_relerr.set_title("Relative error")
ax_relerr.legend(loc='best', prop={'size': 11})
# Plot absolute error
ax_abserr = subplot(1, 2)
ax_abserr.plot(tout_unitless, Cout[:, 0, i]-Cref[:, 0, i],
label=l, color=c[i])
ax_abserr.set_title("Absolute error")
ax_abserr.legend(loc='best', prop={'size': 11})
# Plot absolute error
linE = Cout[:, 0, i] - Cref[:, 0, i]
try:
atol_i = atol[i]
except:
atol_i = atol
wtol_i = (atol_i + rtol*yout[:, 0, i])*get_derived_unit(
rd.unit_registry, 'concentration')
if np.any(np.abs(linE/wtol_i) > 1000):
# Plot true curve in first plot when deviation is large enough
# to be seen visually
ax_sol.plot(tout_unitless, to_unitless(Cref[:, 0, i], molar),
label='true '+l, color=c[i], ls='--')
ax_err = subplot(2, i)
plot_solver_linear_error(integr, Cref, ax_err, si=i,
scale_err=1/wtol_i, color=c[i], label=l)
ax_excess = subplot(3, i, adapt_yscale=False)
plot_solver_linear_excess_error(integr, Cref, ax_excess,
si=i, color=c[i], label=l)
# Plot Reaction Quotient vs time
ax_q = subplot(1, span_all_x=False, adapt_yscale=False,
adapt_xscale=False)
Qnum = Cout[:, 0, 2]/(Cout[:, 0, 0]*Cout[:, 0, 1])
Qref = Cref[:, 0, 2]/(Cref[:, 0, 0]*Cref[:, 0, 1])
ax_q.plot(tout_unitless, to_unitless(Qnum, molar**-1),
label='Q', color=c[i])
if np.any(np.abs(Qnum/Qref-1) > 0.01):
# If more than 1% error in Q, plot the reference curve too
ax_q.plot(tout_unitless, to_unitless(Qref, molar**-1),
'--', label='Qref', color=c[i])
# Plot the
ax_q.plot((tout_unitless[0], tout_unitless[-1]),
[to_unitless(kf/kb, molar**-1)]*2,
'--k', label='K')
ax_q.set_xlabel('t')
ax_q.set_ylabel('[C]/([A][B]) / M**-1')
ax_q.set_title("Transient towards equilibrium")
ax_q.legend(loc='best', prop={'size': 11})
for i in range(npltcols):
subplot(0, i, adapt_yscale=False)
plt.title('Concentration vs. time')
plt.legend(loc='best', prop={'size': 11})
plt.xlabel('t')
plt.ylabel('[X]')
subplot(2, i, adapt_yscale=False)
plt.title('Absolute error in [{}](t) / wtol'.format('ABC'[i]))
plt.legend(loc='best')
plt.xlabel('t')
ttl = '|E_i[{0}]|/(atol_i + rtol*(y0_i+yf_i)/2'
plt.ylabel(ttl.format('ABC'[i]))
plt.tight_layout()
subplot(3, i, adapt_yscale=False)
ttl = 'Excess error in [{}](t) / integrator linear error span'
plt.title(ttl.format(
'ABC'[i]))
plt.legend(loc='best')
plt.xlabel('t')
plt.ylabel('|E_excess[{0}]| / e_span'.format('ABC'[i]))
plt.tight_layout()
save_and_or_show_plot(savefig=savefig)
return yout, to_unitless(Cref, conc_unit), rd, info
if __name__ == '__main__':
argh.dispatch_command(integrate_rd, output_file=None)
|
bsd-2-clause
|
edgarcosta92/ns3-dev
|
scripts/thomas/parse_rtt.py
|
1
|
1147
|
import csv
import numpy as np
import matplotlib.pyplot as plt
file_format = {"src":0, "dst": 1, "src_port": 2, "dst_port":3, "rtt":6}
class ParseRTT(object):
def __init__(self,file_name, file_format = file_format, delimiter = " "):
self._delimiter = delimiter
self._file_format = file_format
self.load_file(file_name)
def load_file(self, file_name):
file = open(file_name, 'rb')
self._raw = csv.reader(file, delimiter=self._delimiter)
def run(self):
rtts = []
for x in self._raw:
try:
rtts.append(float(x[self._file_format["rtt"]]))
except:
continue
return rtts
def get_cdf(self, data):
sorted_data = sorted(data)
y = 1. * np.arange(len(data))
y = [x / (len(data) - 1) for x in y]
##y = comulative_prob[:]
color = ["r--", "g--", "b--", "y--", "k--"]
plt.plot(sorted_data, y, color[0])
plt.xscale('log')
#plt.savefig("fct_" + test_name.replace("/", ""))
plt.show()
A = ParseRTT("rtt.txt", delimiter="\t")
A.get_cdf(A.run())
|
gpl-2.0
|
lionelliang/PairTradingSpark
|
checkpairtradingSimultneously.py
|
1
|
12571
|
#coding:utf-8
import os
import time
import csv
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.tsa.stattools as sts
TABLE_STOCKS_BASIC = 'stock_basic_list'
DownloadDir = './stockdata/'
DATA_TIME = 'running-time'
#date example 2011/10/13
tudateparser = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')
weightdict = {} #previous weight dict
# 功能:从csv文件中读取一个字典
# 输入:文件名称,keyIndex,valueIndex
def readDictCSV(fileName="", dataDict = {}):
if not os.path.exists(fileName) :
return
with open(fileName, "r") as csvFile:
reader = csv.reader(csvFile)
for row in reader:
dataDict[str(row[0])] = [float(row[1]), float(row[2])]
csvFile.close()
return dataDict
# 功能:将一字典写入到csv文件中
# 输入:文件名称,数据字典
def writeDictCSV(fileName="", dataDict={}):
with open(fileName, "wb") as csvFile:
csvWriter = csv.writer(csvFile)
for k,v in dataDict.iteritems():
csvWriter.writerow([str(k), v[0], v[1]])
csvFile.close()
'''
linear regression with Stochastic Gradient Decent mothod
'''
def linregSGD(x, y, a, b):
# -------------------------------------------随机梯度下降算法----------------------------------------------------------
# 两种终止条件
loop_max = 10000 # 最大迭代次数(防止死循环)
epsilon = 1e-6
alpha = 0.001 # 步长(注意取值过大会导致振荡,过小收敛速度变慢)
diff = 0.
errorA = a
errorB = b
count = 0 # 循环次数
finish = False # 终止标志
m = len(x) # 训练数据点数目
while count < loop_max:
#count += 1
# 遍历训练数据集,不断更新权值
for i in range(m):
count += 1
diff = a + b * x[i] - y[i] # 训练集代入,计算误差值
# 采用随机梯度下降算法,更新一次权值只使用一组训练数据
a = a - alpha * diff
b = b - alpha * diff * x[i]
if ((a-errorA)*(a-errorA) + (b-errorB)*(b-errorB)) < epsilon:
# 终止条件:前后两次计算出的权向量的绝对误差充分小
finish = 1
break
else:
errorA = a
errorB = b
if finish == True: # 跳出循环
break
#print 'loop count = %d' % count, '\tweight:[%f, %f]' % (a, b)
return finish, a, b
def adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b):
if len(closeprice_of_1) != 0 and len(closeprice_of_2) != 0:
finish, alpha, beta = linregSGD(x=closeprice_of_1, y=closeprice_of_2, a=a, b=b)
if not finish:
return False, a, b
spread = closeprice_of_2 - closeprice_of_1*beta - alpha
spread.dropna()
adfstat, pvalue, usedlag, nobs, critvalues, icbest = sts.adfuller(x=spread)
return adfstat < critvalues['5%'], alpha, beta
else:
return False, 0, 0
'''
print adfstat
for(k, v) in critvalues.items():
print k, v
'''
def adfuller_check_smols(closeprice_of_1, closeprice_of_2):
if len(closeprice_of_1) != 0 and len(closeprice_of_2) != 0:
X = sm.add_constant(closeprice_of_1)
model = sm.OLS(endog=closeprice_of_2, exog=X)
result = model.fit()
#print result.summary()
print result.params
spread = result.resid
adfstat, pvalue, usedlag, nobs, critvalues, icbest = sts.adfuller(x=spread)
return adfstat < critvalues['5%']
'''
print adfstat
for(k, v) in critvalues.items():
print k, v
'''
'''
spread2 = closeprice_of_2 - closeprice_of_1*result.params.closel
sta2 = sts.adfuller(spread, 1)
print sta2
'''
def compare_algorithm(code1, code2, start_date = '2013-10-10', end_date = '2014-09-30'):
closeprice_of_1, closeprice_of_2 = load_process(code1, code2, start_date, end_date)
print "trading days:", len(closeprice_of_1)
if len(closeprice_of_1)<=1 or len(closeprice_of_1)<=1:
return
time1 = time.time()
result = adfuller_check_smols(closeprice_of_1, closeprice_of_2)
time2 = time.time()
print "smols running time(s): ", time2-time1
time3 = time.time()
a = -1
b = -1
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
time4 = time.time()
print "sgd running time(s): ", time4-time3
time7 = time.time()
#a = 0
#b = 0
np.random.seed(2)
a, b = np.random.randn(2)
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
time8 = time.time()
print "sgd00 running time(s): ", time8-time7
time5 = time.time()
#a = 0.189965
#b = 0.4243
if weightdict.has_key(code1+code2): # get previous weight
a = weightdict[code1+code2][0]
b = weightdict[code1+code2][1]
#print weightdict[code1+code2]
else:
#print "not find w"
np.random.seed(2)
a, b = np.random.randn(2)
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
weightdict[code1+code2] = [result[1], result[2]]
time6 = time.time()
print "sgdmiddle running time(s): ", time6-time5
time9 = time.time()
if weightdict.has_key(code1+code2):
a = weightdict[code1+code2][0]
b = weightdict[code1+code2][1]
else:
print "not find w"
np.random.seed(2)
a, b = np.random.randn(2)
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
weightdict[code1+code2] = [result[1], result[2]]
time10 = time.time()
print "sgdsavedvalue running time(s): ", time10-time9
def adfuller_check_price_sgd(code1, code2, linrreg="SMOLS", start_date = '2013-10-10', end_date = '2014-09-30'):
closeprice_of_1, closeprice_of_2 = load_process(code1, code2, start_date, end_date)
if linrreg == "SMOLS" :
result = adfuller_check_smols(closeprice_of_1, closeprice_of_2)
elif linrreg == "SGD" :
a = 0
b = 0
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
elif linrreg == "SGDMiddle" :
if weightdict.has_key(code1+code2):
a = weightdict[code1+code2][0]
b = weightdict[code1+code2][1]
else:
print "not find w"
np.random.seed(2)
a, b = np.random.randn(2)
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
weightdict[code1+code2] = [result[1], result[2]]
else :
result = ""
return result
def load_process(code1, code2, start_date, end_date):
m = str(code1)
n = str(code2)
file1 = DownloadDir + "h_kline_" + m + ".csv"
file2 = DownloadDir + "h_kline_" + n + ".csv"
if not os.path.exists(file1) or not os.path.exists(file1):
return {}, {}
kline1 = pd.read_csv(file1, parse_dates=['date'], index_col='date', date_parser=tudateparser)
kline2 = pd.read_csv(file2, parse_dates=['date'], index_col='date', date_parser=tudateparser)
#print kline1.head()
price_of_1 = kline1[end_date:start_date]
price_of_2 = kline2[end_date:start_date]
# regroup quotation according to date index
combination = price_of_1.join(price_of_2, how='inner', lsuffix='l', rsuffix='r')
combination.dropna()
closeprice_of_1 = combination['closel'].reset_index(drop=True)
closeprice_of_2 = combination['closer'].reset_index(drop=True)
return closeprice_of_1, closeprice_of_2
def adfuller_check_general(closeprice_of_1, closeprice_of_2, code1, code2, linrreg="SMOLS"):
if linrreg == "SMOLS" :
result = adfuller_check_smols(closeprice_of_1, closeprice_of_2)
elif linrreg == "SGD" :
a = 0
b = 0
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
elif linrreg == "SGDMiddle" :
if weightdict.has_key(code1+code2):
a = weightdict[code1+code2][0]
b = weightdict[code1+code2][1]
else:
print "not find w"
np.random.seed(2)
a, b = np.random.randn(2)
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
weightdict[code1+code2] = [result[1], result[2]]
elif linrreg == "SGDRegress" :
a = 0
b = 0
result = linregSGD(x=closeprice_of_1, y=closeprice_of_2, a=a, b=b)
elif linrreg == "SGDRegressMiddle" :
if weightdict.has_key(code1+code2):
a = weightdict[code1+code2][0]
b = weightdict[code1+code2][1]
else:
print "not find w"
np.random.seed(2)
a, b = np.random.randn(2)
result = linregSGD(x=closeprice_of_1, y=closeprice_of_2, a=a, b=b)
weightdict[code1+code2] = [result[1], result[2]]
else :
result = ""
return result
def simulate_check_5days(close1, close2, code1, code2):
# check every 5days
# period = 250 working days in a year
jump = 5
period = 250
if close1.count() < period:
return
time1 = time.time()
timerowlist = []
index_start = 0
index_end = index_start + period
while index_end < close1.count():
#index_start = index_end - jump -10 # 非优化版本注释掉
part_close1 = close1[index_start:index_end].reset_index(drop=True)
part_close2 = close2[index_start:index_end].reset_index(drop=True)
bRet = adfuller_check_general(part_close1, part_close2, code1, code2, "SGDRegress") #SGDMiddle, SGD, SGDRegressMiddle, SGDRegress
timerowlist.append(time.time()-time1)
index_end += jump
print index_end/jump
timeDF = pd.DataFrame(timerowlist)
timeDF.to_csv(DATA_TIME + '.csv', header=False, index=False)
def adfuller_check_price(code1, code2, start_date = '2013-10-10', end_date = '2014-09-30'):
m = str(code1)
n = str(code2)
file1 = DownloadDir + "h_kline_" + m + ".csv"
file2 = DownloadDir + "h_kline_" + n + ".csv"
if not os.path.exists(file1) or not os.path.exists(file1):
return
kline1 = pd.read_csv(file1, parse_dates=['date'], index_col='date', date_parser=tudateparser)
kline2 = pd.read_csv(file2, parse_dates=['date'], index_col='date', date_parser=tudateparser)
#print kline1.head()
price_of_1 = kline1[end_date:]
price_of_2 = kline2[end_date:]
# regroup quotation according to date index
combination = price_of_1.join(price_of_2, how='inner', lsuffix='l', rsuffix='r')
combination.dropna()
closeprice_of_1 = combination['closel'].reset_index(drop=True)[0:1500]
closeprice_of_2 = combination['closer'].reset_index(drop=True)[0:1500]
return simulate_check_5days(closeprice_of_1, closeprice_of_2, code1, code2)
def adfuller_check2(df):
adfuller_check_price(df[0], df[1])
def check_all_dir():
print 'starting adf checking'
stock_list = pd.read_csv(TABLE_STOCKS_BASIC + '.csv', dtype=str)
code = stock_list['code']
reindexed_code = code.reset_index(drop=True)
reindexed_code = reindexed_code[100:200]
reindexed_code = reindexed_code.reset_index(drop=True)
stockPool = pd.DataFrame(columns=['code1','code2'])
print len(reindexed_code)
for i in range(len(reindexed_code)):
for j in range(i+1, len(reindexed_code)):
stockPool = stockPool.append({'code1':str(reindexed_code[i]), \
'code2':str(reindexed_code[j])}, ignore_index=True)
stockPool.apply(adfuller_check2, axis=1)
## Main functionality
def main():
time1 = time.time()
#adfuller_check_price("601002", "600815")
# chedk all stock pairing in list book
#check_all_dir()
#adfuller_check_price_sgd("601002", "600815", linrreg="SMOLS", start_date = '2013-10-10', end_date = '2014-09-30') #"SGD")
readDictCSV("Linrgre.csv", weightdict)
#compare_algorithm("601002", "600815",start_date = '2013-10-10', end_date = '2014-09-30') #2014-07-30 trading days: 192; 2014-09-30 trading days: 233
#compare_algorithm("601002", "600815", start_date = '2013-10-10', end_date = '2014-09-30') #2014-07-30 trading days: 192; 2014-09-30 trading days: 233
adfuller_check_price("601002", "600815", start_date = '2013-10-10', end_date = '2016-05-30')
writeDictCSV("Linrgre.csv", weightdict)
time2 = time.time()
print "running time(s): ", time2-time1
if __name__ == "__main__":
# Execute Main functionality
main()
|
gpl-2.0
|
mprhode/malware-prediction-rnn
|
experiments/Experiments.py
|
1
|
33646
|
from copy import deepcopy
import numpy as np
import random
import operator
from itertools import combinations
import csv
import time
import os
import gc
import sys
import inspect
from keras.callbacks import EarlyStopping
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix
from .RNN import generate_model
from .useful import *
random.seed(12)
np.random.seed(12)
class Experiment():
"""docstring for Experiment"""
def __init__(self, parameters, search_algorithm="grid",
x_test=None, y_test=None,
x_train=None, y_train=None,
data=None, folds=10,
folder_name=str(time.time()),
thresholding=False, threshold=0.5, model_type="rnn"):
assert (type(folds) is int) or (data == None), "folds must be an integer if data tuple is provided"
assert (search_algorithm.lower() == "grid") or (search_algorithm.lower() == "random"), "Only 'grid' and 'random' permissible values for search_algorithm"
self.headers = [
'memory',
'tx_packets',
'rx_bytes',
'swap',
'rx_packets',
'cpu_sys',
'total_pro',
'cpu_user',
'max_pid',
'tx_bytes']
#hyperparameters
self.original_h_params = parameters
self.h_params = parameters
# set up parameter search space depending on algorithm
self.search_algorithm = search_algorithm
self.current_params = {}
if self.search_algorithm == "grid":
self.h_params = dict([(key, list(self.h_params[key])) for key in self.h_params])
self.original_h_params = deepcopy(self.h_params)
self.current_params = dict([(key, self.h_params[key][0]) for key in self.h_params])
if self.search_algorithm == "random":
self.__list_to_dict_params() # list instances to even probability dictionary instances
self.__map_to_0_1() # re-adjust any instances in which sum(probabilities) > 1
# metrics and writer objects, to be assigned when first experiment written up
self.folder_name = check_filename(folder_name)
self.experiment_id = 0
self.metrics_headers = None
self.metrics_writer = None
#Model type
self.model_type = model_type
# Thresholding set up
self.thresholding = thresholding
if self.thresholding:
self.min_threshold = threshold + K.epsilon()
self.temp_min_threshold = threshold + K.epsilon()
# test-train experiment
if (data == None) or (folds == None):
self.folds = None
self.X_TRAIN = x_train
self.Y_TRAIN = y_train
self.X_TEST = x_test
self.Y_TEST = y_test
print("Test-train experiment")
# k-fold cross-validation experiment
else:
assert folds != None, "Supply number of folds for k-fold cross validation or supplt x_train, y_train, x_test, y_test"
self.folds = folds
self.x = data[0]
self.y = data[1]
print(self.folds, "- fold cross validation experiment")
def __list_to_dict_params(self):
for key in self.h_params:
if type(self.h_params[key]) is list:
self.h_params[key] = dict([(x, 1/(len(self.h_params[key]) + K.epsilon() )) for x in self.h_params[key]])
def __map_to_0_1(self):
"""maps input probabilities to values between 0 and 1, preserving scalar relationships"""
for key in self.h_params:
running_total = 0
scalar = 1/(sum(self.h_params[key].values()))
for possible_value in self.h_params[key]:
if self.h_params[key][possible_value] < 0:
raise ValueError("Negative hyperparameter probabilities are not allowed ({} for {})").format(self.h_params[key][possible_value], possible_value)
new_value = self.h_params[key][possible_value] * scalar
self.h_params[key][possible_value] = new_value + running_total
running_total += new_value
def __random_config(self):
"""randomly generate a configuration of hyperparameters from dictionary self.h_params"""
for key in self.h_params:
choice = random.random()
sorted_options = sorted(self.h_params[key].items(), key=operator.itemgetter(1))
for option in sorted_options:
if choice < option[1]:
self.current_params[key] = option[0]
break
if self.current_params["optimiser"] == "adam":
self.current_params["learning_rate"] = 0.001
print()
def run_one_experiment(self):
print("run one experiment - orig")
#Get new configuration if random search
if self.search_algorithm == "random":
self.__random_config()
self.experiment_id += 1
print("running expt", self.experiment_id, "of", self.num_experiments)
print(self.current_params)
self.metrics = {}
self.current_fold = 1
self.accuracy_scores = []
# k-fold cross-validation
if self.folds != None:
y = deepcopy(self.y)
x = deepcopy(self.x)
#remove short seqeunces and store indicies of kept items
x, y, identifiers = remove_short_idx(x, y, list(range(len(y))), self.current_params["sequence_length"])
labels = {}
temp_y = deepcopy(y).flatten() # get labels as flat array to find stratified folds
for i, class_label in zip(identifiers, temp_y):
if class_label in labels:
labels[class_label].append(i)
else:
labels[class_label] = [i]
#split into number of folds
fold_indicies = [[] for x in range(self.folds)]
# Want to represent class distribution in each set (stratified split)
# Divide indicies list in chunks of size folds
for key in labels:
labels[key] = to_chunks(labels[key], self.folds)
for i, fold_ids in enumerate(labels[key]):
fold_indicies[i] += fold_ids
fold_indicies = np.array([[int(x) for x in index_set] for index_set in fold_indicies])
#take new copies of the original to use indicies from the original sets
x, y = deepcopy(self.x), deepcopy(self.y)
for i in list(range(self.folds)):
test = np.array([(i) % self.folds]) # one fold is test set
train = np.array([i for i in range(self.folds) if i not in [test]]) # remaining folds are training set ( not in [test, val])
test_idxs = np.concatenate(tuple([fold_indicies[i] for i in test]))
train_idxs = np.concatenate(tuple([fold_indicies[i] for i in train]))
self.x_train, self.y_train = truncate_and_tensor(x[train_idxs], y[train_idxs], self.current_params["sequence_length"])
self.x_test, self.y_test = truncate_and_tensor(x[test_idxs], y[test_idxs], self.current_params["sequence_length"])
self.test_idxs = test_idxs
stop = self.set_up_model()
if stop:
return
self.current_fold += 1
# test-train
else:
self.x_train = deepcopy(self.X_TRAIN)
self.y_train = deepcopy(self.Y_TRAIN)
self.x_test = deepcopy(self.X_TEST)
self.y_test = deepcopy(self.Y_TEST)
self.test_idxs = np.array(range(1, len(self.y_test) + 1)) / 10 #divide by 10 to distinguish from training/10-fold data
# remove short sequences - store indicies for test data
self.x_train, self.y_train = remove_short(self.x_train, self.y_train, self.current_params["sequence_length"])
self.x_test, self.y_test, self.test_idxs = remove_short_idx(self.x_test, self.y_test, self.test_idxs, self.current_params["sequence_length"])
self.set_up_model()
def set_up_model(self):
# Leave out feature if specified in dictionary
if "leave_out_feature" in self.current_params:
print("Omitting feature:", self.headers[self.current_params["leave_out_feature"]])
self.x_train = np.delete(self.x_train, self.current_params["leave_out_feature"], 2)
self.x_test = np.delete(self.x_test, self.current_params["leave_out_feature"], 2)
#Shuffle data
self.x_train, self.y_train = unison_shuffled_copies([self.x_train, self.y_train])
self.x_test, self.y_test, self.test_idxs = unison_shuffled_copies([self.x_test, self.y_test, self.test_idxs])
#scale data by test data mean and variance
means, stdvs = get_mean_and_stdv(self.x_train)
self.x_train = scale_array(self.x_train, means, stdvs)
self.x_test = scale_array(self.x_test, means, stdvs)
#Output size - in future delete any cols for categorical which are all zero
print("train, test set size (x):", self.x_train.shape, self.x_test.shape)
model = generate_model(self.x_train, self.y_train, self.current_params, model_type=self.model_type)
#if self.current_fold == 1:
# print(model.summary())
return self.train_model(model) #Returns TRUE if accuracy below threshold
def train_model(self, model):
"""run one fold and write up results"""
print(" fold ", self.current_fold, "of", self.folds)
metrics = self.metrics
reset_states = ResetStatesCallback()
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=8, verbose=0, mode='auto')
start_train = time.time()
h = model.fit(
self.x_train, self.y_train,
batch_size=self.current_params["batch_size"],
epochs=self.current_params["epochs"],
shuffle=True,
verbose=0,
callbacks=[reset_states])
end_train = time.time()
metrics["train_acc"] = h.history["acc"]
start_test = time.time()
pred_Y = model.predict(self.x_test, batch_size=self.current_params["batch_size"])
metrics["preds"] = [x[0] for x in pred_Y]
end_test = time.time()
metrics["truth"] = self.y_test.flatten().tolist()
metrics["categorical_preds"] = [np.round(x) for x in metrics["preds"]]
metrics["fscore"] = f1_score(metrics["truth"], metrics["categorical_preds"])
metrics["accuracy"] = accuracy_score(metrics["truth"], metrics["categorical_preds"])
metrics["experiment_id"] = self.experiment_id
metrics["training_size"] = len(self.y_train)
metrics["test_size"] = len(self.y_test)
metrics["train_time"] = end_train - start_train
metrics["test_time"] = end_test - start_test
metrics["fold_id"] = self.current_fold
metrics["test_idxs"] = self.test_idxs
tn, fp, fn, tp = confusion_matrix(metrics["truth"], metrics["categorical_preds"]).ravel()
self.metrics["tp"] = tp/metrics["truth"].count(1)
self.metrics["tn"] = tn/metrics["truth"].count(0)
self.metrics["fp"] = fp/metrics["truth"].count(0)
self.metrics["fn"] = fn/metrics["truth"].count(1)
if not self.metrics_headers:
#Create files and Write file headers
os.mkdir(self.folder_name)
self.metrics_headers = list(metrics.keys()) + list(self.current_params.keys())
self.metrics_file = open("{}/results.csv".format(self.folder_name), "w")
self.metrics_writer = csv.DictWriter(self.metrics_file, fieldnames=self.metrics_headers)
self.metrics_writer.writeheader()
#Write up metric results
self.metrics_writer.writerow(merge_two_dicts(self.current_params, self.metrics))
#Search type changes
print("acc:", metrics["accuracy"], "fscore:", metrics["fscore"])
for x in ['tn', 'fp', 'fn', 'tp']:
print("{}: {}".format(x, self.metrics[x]), end=" ")
print()
""",
"mal%:", np.round(
metrics["truth"].count(1)/len(metrics["truth"]), decimals=2)
,"tp", tp/metrics["truth"].count(1), "tn", tn/metrics["truth"].count(0)
)
"""
#make space in memory
del model
gc.collect()
self.accuracy_scores.append(metrics["accuracy"])
if self.current_fold == self.folds:
average_acc = sum(self.accuracy_scores) / len(self.accuracy_scores)
print("average acc:", average_acc)
if self.thresholding:
if metrics["accuracy"] < self.temp_min_threshold:
return True
#On last fold check if average accuracy > current threshold, update temporary minimum to smallest from folds accuracy
elif (self.current_fold == self.folds) and (average_acc > self.min_threshold):
self.temp_min_threshold = min(self.accuracy_scores)
self.min_threshold = average_acc
print("* * * NEW RECORD avg acc:", average_acc, "min acc:", self.temp_min_threshold)
return False # Only return true to stop models running
def run_experiments(self, num_experiments=100):
# GRID SEARCH
#Find total possible configurations from options
self.total = 1
for key in self.original_h_params:
self.total *= len(self.original_h_params[key])
if self.search_algorithm == "grid":
header_list = list(self.h_params.keys()) #Fixed keys list to loop in order
countdown = len(self.h_params) - 1
self.num_experiments = self.total
print("grid search of ", self.total, "configurations...")
self.loop_values(header_list, countdown)
# RANDOM SEARCH
elif self.search_algorithm == "random":
self.num_experiments = num_experiments
print("random search of ", self.num_experiments, "configurations of a possible", self.total, "configurations")
while(self.experiment_id <= self.num_experiments):
self.run_one_experiment()
# Experiments run - close data files
print(self.experiment_id, " models run.")
self.metrics_file.close()
def loop_values(self, header_list, countdown):
# loop through all possible configurations in original parameter dictionary
# http://stackoverflow.com/questions/7186518/function-with-varying-number-of-for-loops-python
if (countdown > 0):
for i in self.original_h_params[header_list[countdown]]:
self.current_params[header_list[countdown]] = i
self.loop_values(header_list, countdown - 1)
else:
for i in self.original_h_params[header_list[countdown]]:
self.current_params[header_list[countdown]] = i
self.run_one_experiment()
class Increase_Snaphot_Experiment(Experiment):
"""Experiment to look at change in data snapshot intervals"""
def __init__(self, parameters, search_algorithm="grid",
x_test=None, y_test=None,
x_train=None, y_train=None,
data=None, folds=10,
folder_name=str(time.time()),
thresholding=False, threshold=0.5, run_on_factors=True, model_type="rnn"):
super(Increase_Snaphot_Experiment, self).__init__(parameters, search_algorithm="grid",
x_test=x_test, y_test=y_test,
x_train=x_train, y_train=y_train,
data=data, folds=folds,
folder_name=folder_name,
thresholding=thresholding, threshold=threshold)
self.run_on_factors = run_on_factors
def set_up_model(self):
# Keep only every step-th data snapshot, do not run unless new data involved
if (self.run_on_factors and ((self.current_params["sequence_length"] - 1) % self.current_params["step"] == 0)) or (self.run_on_factors == False):
self.x_train = self.x_train[:,::self.current_params["step"]]
self.x_test = self.x_test[:,::self.current_params["step"]]
# Shuffle data
self.x_train, self.y_train = unison_shuffled_copies([self.x_train, self.y_train])
self.x_test, self.y_test, self.test_idxs = unison_shuffled_copies([self.x_test, self.y_test, self.test_idxs])
# scale data by test data mean and variance
means, stdvs = get_mean_and_stdv(self.x_train)
self.x_train = scale_array(self.x_train, means, stdvs)
self.x_test = scale_array(self.x_test, means, stdvs)
# Output size - in future delete any cols for categorical which are all zero
model = generate_model(self.x_train, self.y_train,
self.current_params, model_type=self.model_type)
if self.current_fold == 1:
print(model.summary())
#UNINDENT line BELOW
return self.train_model(model) #Returns TRUE if accuracy below threshold
class Ensemble(Experiment):
def write_up_models(self, models, preds):
self.metrics["truth"] = self.y_test.flatten()
self.metrics["preds"] = np.array(preds).max(axis=0)
self.metrics["categorical_preds"] = [np.round(x) for x in self.metrics["preds"]]
self.metrics["fscore"] = f1_score(self.metrics["truth"], self.metrics["categorical_preds"])
self.metrics["accuracy"] = accuracy_score(self.metrics["truth"], self.metrics["categorical_preds"])
writeable = merge_two_dicts(self.current_params, self.metrics)
if not self.metrics_headers:
#Create files and Write file headers
os.mkdir(self.folder_name)
self.metrics_headers = writeable.keys()
self.metrics_file = open("{}/results.csv".format(self.folder_name), "w")
self.metrics_writer = csv.DictWriter(self.metrics_file, fieldnames=self.metrics_headers)
self.metrics_writer.writeheader()
self.metrics_writer.writerow(merge_two_dicts(self.current_params, self.metrics))
#Search type changes
print("acc:", self.metrics["accuracy"])
#make space in memory
for model in models:
del model
gc.collect()
class Ensemble_configurations(Ensemble):
"""Average the predictions of multiple models passed as a list as configurations"""
def __init__(self, parameters, search_algorithm="grid",
x_test=None, y_test=None,
x_train=None, y_train=None,
data=None, folds=10,
folder_name=str(time.time()),
thresholding=False, threshold=0.5, run_on_factors=True,
model_type="rnn", batch_size=64):
super(Ensemble_configurations, self).__init__(parameters[0],
search_algorithm="grid",
x_test=x_test, y_test=y_test, x_train=x_train, y_train=y_train,
data=data, folds=folds, folder_name=folder_name,
thresholding=thresholding, threshold=threshold, model_type=model_type)
self.search_algorithm = None
# Sequence length is the only shared variable - get distinct values
self.sequence_lengths = set([s for seq_lens in [config["sequence_length"] for config in parameters] for s in seq_lens])
#Only one parameter considered from each config, not a search experiment
if not(all([all([len(config[key]) == 1 for key in config]) for config in parameters])):
"not a search experiment, only one parameter will be used from each configuration"
# dictionary values to list
self.configurations = []
for config in parameters:
config = dict([(key, list(config[key])[0]) for key in config])
self.configurations.append(config)
#Can set all batch sizes the same if specified
if batch_size:
config["batch_size"] = batch_size
def set_up_model(self):
# Shuffle data
#self.x_train, self.y_train, self.x_val, self.y_val = extract_val_set_binary(self.x_train, self.y_train, 0.1)
self.x_train, self.y_train = unison_shuffled_copies([self.x_train, self.y_train])
self.x_test, self.y_test, self.test_idxs = unison_shuffled_copies([self.x_test, self.y_test, self.test_idxs])
#self.x_val, self.y_val = unison_shuffled_copies([self.x_val, self.y_val])
# scale data by test data mean and variance
means, stdvs = get_mean_and_stdv(self.x_train)
self.x_train = scale_array(self.x_train, means, stdvs)
self.x_test = scale_array(self.x_test, means, stdvs)
#self.x_val = scale_array(self.x_val, means, stdvs)
# Output size - in future delete any cols for categorical which are all zero
models = [generate_model(
self.x_train, self.y_train, config, model_type=self.model_type
) for config in self.configurations]
reset_states = ResetStatesCallback()
preds = []
#early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=, verbose=0, mode='auto')
print("train, test set size (x):", self.x_train.shape, self.x_test.shape)
print("train mal:",self.y_train.flatten().tolist().count(1) , "ben:",self.y_train.flatten().tolist().count(0) , "test mal:", self.y_test.flatten().tolist().count(1), "ben:", self.y_test.flatten().tolist().count(0))
for model, config in zip(models, self.configurations):
h = model.fit(
self.x_train, self.y_train,
batch_size=config["batch_size"],
epochs=config["epochs"],
shuffle=True,
verbose=0,
callbacks=[reset_states]
)
d = config["description"]
self.metrics["train_acc_{}".format(d)] = h.history["acc".format(d)]
pred_Y = model.predict(self.x_test, batch_size=config["batch_size"])
preds.append(pred_Y)
#self.metrics["val_preds_{}"] = model.predict(self.x_val, batch_size=config["batch_size"]).flatten().tolist()
#self.metrics["val_truth"] = self.y_val.flatten().tolist()
self.metrics["test_idxs"] = self.test_idxs
self.metrics["preds_{}".format(d)] = pred_Y.flatten()
self.metrics["acc_{}".format(d)] = accuracy_score(
self.y_test.flatten().round(),
self.metrics["preds_{}".format(d)].round()
)
print(d, "acc :", self.metrics["acc_{}".format(d)], "f1", f1_score(self.y_test.flatten().round(),
self.metrics["preds_{}".format(d)].round()))
self.write_up_models(models, preds)
return False
def run_experiments(self):
# Only changeable parameter is sequence length
self.num_experiments = len(self.sequence_lengths)
for s in self.sequence_lengths:
self.current_params = {"sequence_length": s}
self.run_one_experiment()
# Experiments run - close data files
print(self.experiment_id, " models run.")
self.metrics_file.close()
class Ensemble_sub_sequences(Ensemble):
"""Ensemble models for sub-seqeunces of data"""
def __init__(self, parameters, search_algorithm="grid",
x_test=None, y_test=None,
x_train=None, y_train=None,
data=None, folds=10,
folder_name=str(time.time()),
thresholding=False, threshold=0.5):
super(Ensemble_sub_sequences, self).__init__(parameters, search_algorithm="grid", x_test=x_test, y_test=y_test, x_train=x_train, y_train=y_train, data=data, folds=folds, folder_name=folder_name, thresholding=thresholding, threshold=threshold)
self.search_algorithm = None
# Sequence length is the only shared variable - get distinct values
self.sequence_lengths = self.h_params["sequence_length"]
def set_up_model(self):
# Shuffle data
self.x_train, self.y_train = unison_shuffled_copies([self.x_train, self.y_train])
self.x_test, self.y_test, self.test_idxs = unison_shuffled_copies([self.x_test, self.y_test, self.test_idxs])
# scale data by test data mean and variance
means, stdvs = get_mean_and_stdv(self.x_train)
self.x_train = scale_array(self.x_train, means, stdvs)
self.x_test = scale_array(self.x_test, means, stdvs)
# Output size - in future delete any cols for categorical which are all zero
models = []
reset_states = ResetStatesCallback()
metrics = self.metrics
preds = []
training_sets = []
testing_sets = []
idx_len_tuples = []
#Create multiple training sets
for length in list(range(1, self.current_params["sequence_length"])):
mini_train_X, b = into_sliding_chunk_arrays(self.x_train, length)
mini_test_X, b = into_sliding_chunk_arrays(self.x_test, length)
idx_len_tuples += b
training_sets += mini_train_X #append for merge2
testing_sets += mini_test_X
#Finally add whole sets
training_sets.append(self.x_train)
testing_sets.append(self.x_test)
idx_len_tuples.append((0, self.current_params["sequence_length"]))
for i, train_set in enumerate(training_sets):
d = idx_len_tuples[i]
self.x_train = train_set
self.x_test = testing_sets[i]
#Output size - in future delete any cols for categorical which are all zero
model = generate_model(self.x_train, self.y_train, self.current_params)
h = model.fit(self.x_train, self.y_train,
batch_size=self.current_params["batch_size"],
epochs=self.current_params["epochs"],
verbose=0, shuffle=True, callbacks=[reset_states])
self.metrics["train_acc_{}".format(d)] = h.history["acc"]
pred_Y = model.predict(self.x_test, batch_size=self.current_params["batch_size"])
preds.append(pred_Y)
self.metrics["preds_{}".format(d)] = [x[0] for x in pred_Y]
self.metrics["acc_{}".format(d)] = accuracy_score(self.y_test.flatten(), [np.round(x) for x in self.metrics["preds_{}".format(d)]])
print(d, "acc :", self.metrics["acc_{}".format(d)])
self.write_up_models(models, preds)
return False
def run_experiments(self):
# Only changeable parameter is sequence length
self.num_experiments = len(self.sequence_lengths)
for s in self.sequence_lengths:
self.run_one_experiment()
# Experiments run - close data files
print(self.experiment_id, " models run.")
self.metrics_file.close()
class SlidingWindow(Experiment):
"""Ensemble models for sub-seqeunces of data"""
def __init__(self, parameters, search_algorithm="grid",
x_test=None, y_test=None,
x_train=None, y_train=None,
data=None, folds=10,
folder_name=str(time.time()),
thresholding=False, threshold=0.5):
super(SlidingWindow, self).__init__(parameters, search_algorithm="grid", x_test=x_test, y_test=y_test, x_train=x_train, y_train=y_train, data=data, folds=folds, folder_name=folder_name, thresholding=thresholding, threshold=threshold)
def run_one_experiment(self):
print("run one experiment - sliding window")
#Get new configuration if random search
if self.search_algorithm == "random":
self.__random_config()
self.experiment_id += 1
print("running expt", self.experiment_id, "of", self.num_experiments)
print(self.current_params)
self.metrics = {}
self.current_fold = 1
self.accuracy_scores = []
# k-fold cross-validation
if self.folds != None:
raise NotImplementedError("Sliding Window has not been implemented for k-fold validation")
# test-train
else:
self.x_train = deepcopy(self.X_TRAIN)
self.y_train = deepcopy(self.Y_TRAIN)
self.x_test = deepcopy(self.X_TEST)
self.y_test = deepcopy(self.Y_TEST)
print("1---", self.x_train.shape)
self.test_idxs = np.array(range(1, len(self.y_test) + 1)) / 10 #divide by 10 to distinguish from training/10-fold data
self.set_up_model()
def set_up_model(self):
print("---", self.x_train.shape)
# Output size - in future delete any cols for categorical which are all zero
models = []
reset_states = ResetStatesCallback()
metrics = self.metrics
preds = []
all_x_train = []
all_y_train = []
testing_x = []
testing_y = []
test_idxs = []
#Get sliding window data
seq_len = self.current_params["sequence_length"]
for i in list(range(max([len(x) for x in self.x_train]) - seq_len)):
for x, y, store_x, store_y in zip(
[self.x_train, self.x_test],
[self.y_train, self.y_test],
[all_x_train, testing_x],
[all_y_train, testing_y],
):
temp_x = []
temp_y = []
idxs = []
for n in range(len(x)):
if len(x[n][i:seq_len+i]) == seq_len:
new = np.array(x[n][i:seq_len+i])
print(new.shape, np.array([range(i, seq_len+i)]).T.shape)
temp_x.append(
np.concatenate((new, np.array([range(i, seq_len+i)]).T), axis=1)
)
temp_y.append(np.array(y[n]))
if store_x is testing_x:
idxs.append(self.test_idxs[n])
if len(temp_y):
store_x.append(np.array(temp_x))
store_y.append(np.array(temp_y))
if len(idxs):
test_idxs.append(idxs)
# Concatenate training data
self.x_train = np.concatenate(all_x_train)
self.y_train = np.concatenate(all_y_train)
self.x_test = testing_x
self.y_test = testing_y
self.test_idxs = test_idxs
# scale data by test data mean and variance
means, stdvs = get_mean_and_stdv(self.x_train)
self.x_train = scale_array(self.x_train, means, stdvs)
self.x_test = [scale_array(x, means, stdvs) for x in self.x_test]
print(2, self.x_train.shape, self.y_train.shape)
# Shuffle trainig data
self.x_train, self.y_train = unison_shuffled_copies([self.x_train, self.y_train])
#Output size - in future delete any cols for categorical which are all zero
print("train, test set size (x):", self.x_train.shape, [x.shape for x in self.x_test])
model = generate_model(self.x_train, self.y_train, self.current_params, model_type=self.model_type)
#if self.current_fold == 1:
# print(model.summary())
return self.train_model(model) #Returns TRUE if accuracy below threshold
def train_model(self, model):
# Train model
model = generate_model(self.x_train, self.y_train, self.current_params)
reset_states = ResetStatesCallback()
start_train = time.time()
h = model.fit(
self.x_train, self.y_train,
batch_size=self.current_params["batch_size"],
epochs=self.current_params["epochs"],
shuffle=True,
verbose=1,
callbacks=[reset_states])
end_train = time.time()
# test model
for i, test_set in enumerate(self.x_test):
self.metrics["train_acc"] = h.history["acc"]
self.metrics["slide_start"] = i
pred_Y = model.predict(test_set, batch_size=self.current_params["batch_size"])
self.metrics["preds"] = [x[0] for x in pred_Y]
metrics = self.metrics
metrics["train_acc"] = h.history["acc"]
metrics["truth"] = self.y_test[i].flatten().tolist()
metrics["categorical_preds"] = [np.round(x) for x in metrics["preds"]]
metrics["fscore"] = f1_score(metrics["truth"], metrics["categorical_preds"])
metrics["accuracy"] = accuracy_score(metrics["truth"], metrics["categorical_preds"])
metrics["experiment_id"] = self.experiment_id
metrics["training_size"] = len(self.y_train)
metrics["test_size"] = len(self.y_test[i])
metrics["train_time"] = end_train - start_train
metrics["test_idxs"] = self.test_idxs
if not self.metrics_headers:
#Create files and Write file headers
os.mkdir(self.folder_name)
self.metrics_headers = list(metrics.keys()) + list(self.current_params.keys())
self.metrics_file = open("{}/results.csv".format(self.folder_name), "w")
self.metrics_writer = csv.DictWriter(self.metrics_file, fieldnames=self.metrics_headers)
self.metrics_writer.writeheader()
#Write up metric results
self.metrics_writer.writerow(merge_two_dicts(self.current_params, self.metrics))
print("acc:", metrics["accuracy"], "fscore:", metrics["fscore"], "start", i, test_set.shape)
del model
gc.collect()
return False
class SlidingWindow_Support(SlidingWindow):
def __init__(self, parameters, search_algorithm="grid",
x_test=None, y_test=None,
x_train=None, y_train=None,
data=None, folds=10,
folder_name=str(time.time()),
thresholding=False, threshold=0.5):
super(SlidingWindow, self).__init__(parameters, search_algorithm="grid", x_test=x_test, y_test=y_test, x_train=x_train, y_train=y_train, data=data, folds=folds, folder_name=folder_name, thresholding=thresholding, threshold=threshold)
def set_up_model(self):
print("---", self.x_train.shape)
# Output size - in future delete any cols for categorical which are all zero
models = []
reset_states = ResetStatesCallback()
metrics = self.metrics
preds = []
testing_x = []
testing_y = []
test_idxs = []
#Get sliding window data
seq_len = self.current_params["sequence_length"]
for i in list(range(max([len(x) for x in self.x_train]) - seq_len)):
for x, y, store_x, store_y in zip(
[self.x_test],
[self.y_test],
[testing_x],
[testing_y],
):
temp_x = []
temp_y = []
idxs = []
for n in range(len(x)):
if len(x[n][i:seq_len+i]) == seq_len:
temp_x.append(np.array(x[n][i:seq_len+i]))
temp_y.append(np.array(y[n]))
idxs.append(self.test_idxs[n])
if len(temp_y):
store_x.append(np.array(temp_x))
store_y.append(np.array(temp_y))
test_idxs.append(idxs)
# Concatenate training data
self.x_train, self.y_train = remove_short(self.x_train, self.y_train, self.current_params["sequence_length"])
self.x_test = testing_x
self.y_test = testing_y
self.test_idxs = test_idxs
# scale data by test data mean and variance
means, stdvs = get_mean_and_stdv(self.x_train)
self.x_train = scale_array(self.x_train, means, stdvs)
self.x_test = [scale_array(x, means, stdvs) for x in self.x_test]
print(2, self.x_train.shape, self.y_train.shape)
# Shuffle trainig data
self.x_train, self.y_train = unison_shuffled_copies([self.x_train, self.y_train])
#Output size - in future delete any cols for categorical which are all zero
print("train, test set size (x):", self.x_train.shape, [x.shape for x in self.x_test])
model = generate_model(self.x_train, self.y_train, self.current_params, model_type=self.model_type)
#if self.current_fold == 1:
# print(model.summary())
return self.train_model(model) #Returns TRUE if accuracy below threshold
class Omit_test_data(Experiment):
def train_model(self, model):
"""run one fold and write up results"""
print(" fold ", self.current_fold, "of", self.folds)
metrics = self.metrics
reset_states = ResetStatesCallback()
if not len(self.y_test):
return
model.fit(
self.x_train, self.y_train,
batch_size=self.current_params["batch_size"],
epochs=self.current_params["epochs"],
verbose=0,
shuffle=True,
callbacks=[reset_states])
headers = [x + "_ON" for x in self.headers[:len(self.x_test[0][0])]]
indicies = list(range(len(headers)))
for num_missing in range(1, len(indicies)+1):
for subset in combinations(indicies, num_missing):
temp_test_X = deepcopy(self.x_test)
states = [1 for x in range(len(indicies))]
for feature_index in subset:
states[feature_index] = 0
temp_test_X[:,:,feature_index] = 0 # because 0 is the mean of the training data
print(states)
pred_Y = model.predict(temp_test_X, batch_size=self.current_params["batch_size"]) #Predicted value of Y
pred_Y = [x[0] for x in pred_Y]
Y_classes = [np.round(x) for x in pred_Y] #Choose a class
metrics["preds"] = pred_Y
metrics["categorical_preds"] = Y_classes
metrics["truths"] = [int(x[0]) for x in self.y_test]
metrics["variance"] = np.var(pred_Y)
metrics["accuracy"] = accuracy_score(self.metrics["truths"], self.metrics["categorical_preds"])
metrics["fmeasure"] = accuracy_score(self.metrics["truths"], self.metrics["categorical_preds"])
for h in range(len(headers)):
metrics[headers[h]] = states[h]
if not self.metrics_headers:
#Create files and Write file headers
os.mkdir(self.folder_name)
self.metrics_headers = list(metrics.keys()) + list(self.current_params.keys())
self.metrics_file = open("{}/results.csv".format(self.folder_name), "w")
self.metrics_writer = csv.DictWriter(self.metrics_file, fieldnames=self.metrics_headers)
self.metrics_writer.writeheader()
#Write up metric results
self.metrics_writer.writerow(merge_two_dicts(self.current_params, self.metrics))
#Search type changes
print("acc:", metrics["accuracy"], "features on:", str(states))
#make space in memory
del model
gc.collect()
|
apache-2.0
|
soodoku/text-as-data
|
tdm/tdm.py
|
1
|
8426
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = 'r1 (2014/08/16)'
import os
import sys
import csv
csv.field_size_limit(sys.maxint)
import optparse
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
def TDMReport(vec, X, opts):
"""
# TDMReport(tdm)
- Report most frequent, sparse
"""
freq_df = pd.DataFrame({'term': vec.get_feature_names(),
'sum': np.asarray(X.sum(axis=0)).ravel().tolist()})
freq_df['ratio'] = freq_df['sum']/np.sum(freq_df['sum'])
print("Total terms: {0:d}".format(len(vec.vocabulary_)))
print("Most frequent {0:d} terms: ".format(opts.n_freq))
print freq_df.sort('sum', ascending=False).head(opts.n_freq)
print("Most sparse {0:d} terms: ".format(opts.n_sparse))
print freq_df.sort('sum', ascending=True).head(opts.n_sparse)
return freq_df
def CreateTDM(df, opts):
"""
# Document Term Matrix
# CreateTDM(Traindata, labelColumn(s), textColumn, unigrams/bigrams/other?)
- outputs a tdm
- calls TDMReport(tdm)
"""
vec = CountVectorizer(ngram_range=(opts.min_ngram, opts.max_ngram),
max_features=opts.max_features,
stop_words=opts.remove_terms)
X = vec.fit_transform(df[opts.textColumn])
return vec, X
def TFIDFReport(vec, X, opts):
freq_df = pd.DataFrame({'term': vec.get_feature_names(),
'sum': np.asarray(X.sum(axis=0)).ravel().tolist()})
freq_df['ratio'] = freq_df['sum']/np.sum(freq_df['sum'])
print("Total terms: {0:d}".format(len(vec.vocabulary_)))
print("Most frequent {0:d} terms: ".format(opts.n_freq))
print freq_df.sort('sum', ascending=False).head(opts.n_freq)
print("Most sparse {0:d} terms: ".format(opts.n_sparse))
print freq_df.sort('sum', ascending=True).head(opts.n_sparse)
return freq_df
def CreateTFIDF(X, opts):
"""
# CreateTFIDF(Traindata, labelColumn, textColumns)
- outputs a tf-idf
- calls tdidfReport(tfidf)
"""
tfidf = TfidfTransformer(norm="l2")
X = tfidf.fit_transform(X)
return X
def RemoveSparse(freq, opts):
"""
# RemoveSparse(tdm, Y)
# Remove sparse terms (terms which are very rare etc.)
- Y is a way to subset tdm
- automatically implement the default Y that is suggested
"""
if opts.remove_n_sparse:
head = freq.sort('sum', ascending=True).head(opts.remove_n_sparse)
n_sparses = head['term'].tolist()
else:
n_sparses = []
return n_sparses
def RemoveFrequent(freq, opts):
"""
# RemoveFrequent(tdm, X)
- Removes frequent terms
- X is a way to subset tdm
- automatically implement the default X that is suggested
"""
if opts.remove_n_freq:
head = freq.sort('sum', ascending=False).head(opts.remove_n_freq)
n_frequents = head['term'].tolist()
else:
n_frequents = []
return n_frequents
def parse_command_line(argv):
"""Command line options parser for the script
"""
usage = "Usage: %prog [options] <CSV input file>"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-t", "--text", action="store",
type="string", dest="textColumn", default="Body",
help="Text column name (default: 'Body')")
parser.add_option("-l", "--labels", action="store",
type="string", dest="labelColumns",
default="Online Section",
help="Label column(s) name (default: 'Online Section')")
parser.add_option("-d", "--delimiter", action="store",
type="string", dest="delimiter", default=";",
help="Delimeter use to split option's value if multiple"
" values (default: ';')")
parser.add_option("--min-ngram", action="store",
type="int", dest="min_ngram", default=1,
help="Minimum ngram(s) (default: 1)")
parser.add_option("--max-ngram", action="store",
type="int", dest="max_ngram", default=2,
help="Maximum ngram(s) (default: 2)")
parser.add_option("--max-features", action="store",
type="int", dest="max_features", default=2**16,
help="Maximum features (default: 2**16)")
parser.add_option("--n-freq", action="store",
type="int", dest="n_freq", default=20,
help="Report most frequent terms (default: 20)")
parser.add_option("--n-sparse", action="store",
type="int", dest="n_sparse", default=20,
help="Report most sparse terms (default: 20)")
parser.add_option("-r", "--remove-terms-file", action="store",
type="string", dest="remove_terms_file", default=None,
help="File name contains terms to be removed"
" (default: None)")
parser.add_option("--remove-n-freq", action="store",
type="int", dest="remove_n_freq", default=0,
help="Top most of frequent term(s) to be removed"
" (default: 0)")
parser.add_option("--remove-n-sparse", action="store",
type="int", dest="remove_n_sparse", default=0,
help="Top most of sparse term(s) to be removed"
" (default: 0)")
parser.add_option("--out-tdm-file", action="store",
type="string", dest="out_tdm_file", default=None,
help="Save output TDM to CSV filename (default: None)")
parser.add_option("--use-tfidf", action="store_true",
dest="use_tfidf", default=False,
help="Use TF-IDF (default: False)")
parser.add_option("--out-tfidf-file", action="store",
type="string", dest="out_tfidf_file", default=None,
help="Save output TF-IDF to CSV filename (default: None)")
return parser.parse_args(argv)
if __name__ == "__main__":
print("{0!s} - {1!s}\n".format(os.path.basename(sys.argv[0]), __version__))
(opts, args) = parse_command_line(sys.argv)
if len(args) < 2:
print("Usage: {0!s} [options] <CSV input file>".format(
os.path.basename(sys.argv[0])))
sys.exit(-1)
print("Options: {0!s}".format(opts))
opts.remove_terms = []
if opts.remove_terms_file:
try:
opts.remove_terms = pd.read_csv(opts.remove_terms_file,
header=None)[0].tolist()
except:
print("WARN: Cannot read remove terms file ({0!s})".format(
opts.remove_terms_file))
print("Reading input file...({0!s})".format(args[1]))
cols = [opts.textColumn] + opts.labelColumns.split(opts.delimiter)
df = pd.read_csv(args[1], usecols=cols)
print("Creating Term Document Matrix...")
vec, X = CreateTDM(df, opts)
freq = TDMReport(vec, X, opts)
n_sparses = RemoveSparse(freq, opts)
n_frequents = RemoveFrequent(freq, opts)
out_df = pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
cols = out_df.columns - n_sparses - n_frequents
ext_cols = opts.labelColumns.split(opts.delimiter)
out_df = out_df[cols].join(pd.DataFrame(df[ext_cols]))
if opts.out_tdm_file:
print("Saving TDM output to CSV file... ({0!s})".format(opts.out_tdm_file))
out_df.to_csv(opts.out_tdm_file, index_label='index')
if opts.use_tfidf:
print("Creating TF-IDF Matrix...")
X = CreateTFIDF(X, opts)
freq = TFIDFReport(vec, X, opts)
n_sparses = RemoveSparse(freq, opts)
n_frequents = RemoveFrequent(freq, opts)
out_df = pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
cols = out_df.columns - n_sparses - n_frequents
out_df = out_df[cols].join(pd.DataFrame(df[ext_cols]))
if opts.out_tfidf_file:
print("Saving TF-IDF output to CSV file... ({0!s})".format(
opts.out_tfidf_file))
out_df.to_csv(opts.out_tfidf_file, index_label='index')
print("Done!!!")
|
mit
|
costypetrisor/scikit-learn
|
examples/linear_model/plot_theilsen.py
|
232
|
3615
|
"""
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
chengsoonong/acton
|
tests/test_integration.py
|
1
|
18921
|
#!/usr/bin/env python3
"""
test_integration
----------------------------------
Integration tests.
"""
import sys
sys.path.append("..")
import os.path
import struct
import unittest
import unittest.mock
import acton.cli
import acton.database
import acton.proto.io
import acton.proto.wrappers
import acton.proto.acton_pb2
from click.testing import CliRunner
import numpy
class TestIntegration(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def tearDown(self):
pass
def test_classification_passive_txt(self):
"""Acton handles a passive classification task with an ASCII file."""
txt_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification.txt'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', txt_path,
'-o', 'passive.pb',
'--recommender', 'RandomRecommender',
'--predictor', 'LogisticRegression',
'--epochs', '2',
'--label', 'col20'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('passive.pb'))
reader = acton.proto.io.read_protos(
'passive.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
def test_classification_ascii_non_integer_labels(self):
"""Acton handles non-integer labels in an ASCII table."""
txt_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_str.txt'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', txt_path,
'-o', 'str.pb',
'--recommender', 'RandomRecommender',
'--predictor', 'LogisticRegression',
'--epochs', '2',
'--label', 'label'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('str.pb'))
reader = acton.proto.io.read_protos(
'str.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
def test_classification_gpc_non_integer_labels(self):
"""Acton handles non-integer labels with a GPClassifier."""
txt_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_str.txt'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', txt_path,
'-o', 'str.pb',
'--recommender', 'RandomRecommender',
'--predictor', 'GPC',
'--epochs', '2',
'--label', 'label'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('str.pb'))
reader = acton.proto.io.read_protos(
'str.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
def test_classification_uncrt_non_integer_labels(self):
"""Acton handles non-integer labels with uncertainty sampling."""
txt_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_str.txt'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', txt_path,
'-o', 'str.pb',
'--recommender', 'UncertaintyRecommender',
'--predictor', 'LogisticRegression',
'--epochs', '2',
'--label', 'label'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('str.pb'))
reader = acton.proto.io.read_protos(
'str.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
def test_classification_pandas_non_integer_labels(self):
"""Acton handles non-integer labels in a pandas table."""
txt_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_pandas_str.h5'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', txt_path,
'-o', 'str.pb',
'--recommender', 'RandomRecommender',
'--predictor', 'LogisticRegression',
'--epochs', '2',
'--label', 'label',
'--pandas-key', 'classification'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('str.pb'))
reader = acton.proto.io.read_protos(
'str.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
def test_classification_h5_non_integer_labels(self):
"""Acton handles non-integer labels in an HDF5 table."""
txt_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_str.h5'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', txt_path,
'-o', 'str.pb',
'--recommender', 'RandomRecommender',
'--predictor', 'LogisticRegression',
'--epochs', '2',
'--label', 'labels',
'--feature', 'features'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('str.pb'))
reader = acton.proto.io.read_protos(
'str.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
def test_classification_passive_pandas(self):
"""Acton handles a passive classification task with a pandas table."""
pandas_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_pandas.h5'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', pandas_path,
'-o', 'passive.pb',
'--recommender', 'RandomRecommender',
'--predictor', 'LogisticRegression',
'--epochs', '2',
'--label', 'col20',
'--pandas-key', 'classification'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('passive.pb'))
reader = acton.proto.io.read_protos(
'passive.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
def test_classification_passive_fits(self):
"""Acton handles a passive classification task with a FITS table."""
fits_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification.fits'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', fits_path,
'-o', 'passive.pb',
'--recommender', 'RandomRecommender',
'--predictor', 'LogisticRegression',
'--epochs', '2',
'--label', 'col20'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('passive.pb'))
reader = acton.proto.io.read_protos(
'passive.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
def test_classification_uncertainty(self):
"""Acton handles a classification task with uncertainty sampling."""
txt_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification.txt'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', txt_path,
'-o', 'uncertainty.pb',
'--recommender', 'UncertaintyRecommender',
'--predictor', 'LogisticRegression',
'--epochs', '2',
'--label', 'col20'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('uncertainty.pb'))
reader = acton.proto.io.read_protos(
'uncertainty.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
def test_classification_qbc(self):
"""Acton handles a classification task with QBC and a LR committee."""
txt_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification.txt'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.main,
['--data', txt_path,
'-o', 'qbc.pb',
'--recommender', 'QBCRecommender',
'--predictor', 'LogisticRegressionCommittee',
'--epochs', '2',
'--label', 'col20'])
if result.exit_code != 0:
raise result.exception
self.assertEqual('', result.output)
self.assertTrue(os.path.exists('qbc.pb'))
reader = acton.proto.io.read_protos(
'qbc.pb', acton.proto.acton_pb2.Predictions)
protos = list(reader)
self.assertEqual(
2, len(protos),
msg='Expected 2 protobufs; found {}'.format(len(protos)))
class TestComponentCLI(unittest.TestCase):
"""Tests the CLI to the label/predict/recommend components."""
def setUp(self):
self.runner = CliRunner()
def test_label_args(self):
"""acton-label takes arguments and outputs a protobuf."""
db_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_pandas.h5'))
with self.runner.isolated_filesystem():
result = self.runner.invoke(
acton.cli.label,
['--data', db_path,
'--label', 'col20',
'--feature', 'col10',
'--feature', 'col11',
'--pandas-key', 'classification'],
input='1\n2\n3\n\n')
if result.exit_code != 0:
raise result.exception
output = result.output_bytes
length, = struct.unpack('<Q', output[:8])
proto = output[8:]
self.assertEqual(len(proto), length)
labels = acton.proto.wrappers.LabelPool.deserialise(proto)
self.assertEqual([1, 2, 3], labels.ids)
self.assertTrue(labels.proto.db.path.endswith('_pandas.h5'))
self.assertEqual({
'feature_cols': ['col10', 'col11'],
'label_col': 'col20',
'key': 'classification',
'encode_labels': True,
}, labels.db_kwargs)
def test_label_protobuf(self):
"""acton-label takes and outputs a protobuf."""
db_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_pandas.h5'))
with self.runner.isolated_filesystem():
db_kwargs = {
'feature_cols': ['col10', 'col11'],
'label_col': 'col20',
'key': 'classification',
'encode_labels': True,
}
with acton.database.PandasReader(db_path, **db_kwargs) as db:
proto = acton.proto.wrappers.Recommendations.make(
labelled_ids=[1, 2, 3],
recommended_ids=[4],
recommender='UncertaintyRecommender',
db=db).proto.SerializeToString()
assert isinstance(proto, bytes)
length = struct.pack('<Q', len(proto))
result = self.runner.invoke(
acton.cli.label,
input=length + proto)
if result.exit_code != 0:
raise result.exception
output = result.output_bytes
length, = struct.unpack('<Q', output[:8])
proto = output[8:]
self.assertEqual(len(proto), length)
labels = acton.proto.wrappers.LabelPool.deserialise(proto)
self.assertEqual([1, 2, 3, 4], labels.ids)
self.assertTrue(labels.proto.db.path.endswith('_pandas.h5'))
self.assertEqual({
'feature_cols': ['col10', 'col11'],
'label_col': 'col20',
'key': 'classification',
'encode_labels': True,
}, labels.db_kwargs)
def test_predict(self):
"""acton-predict takes and outputs a protobuf."""
db_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_pandas.h5'))
with self.runner.isolated_filesystem():
db_kwargs = {
'feature_cols': ['col10', 'col11'],
'label_col': 'col20',
'key': 'classification',
'encode_labels': True,
}
with acton.database.PandasReader(db_path, **db_kwargs) as db:
proto = acton.proto.wrappers.LabelPool.make(
ids=[1, 2, 3],
db=db)
proto = proto.proto.SerializeToString()
assert isinstance(proto, bytes)
length = struct.pack('<Q', len(proto))
result = self.runner.invoke(
acton.cli.predict,
input=length + proto)
if result.exit_code != 0:
raise result.exception
output = result.output_bytes
length, = struct.unpack('<Q', output[:8])
proto = output[8:]
self.assertEqual(len(proto), length)
predictions = acton.proto.wrappers.Predictions.deserialise(proto)
self.assertEqual([1, 2, 3], predictions.labelled_ids)
self.assertTrue(predictions.proto.db.path.endswith('_pandas.h5'))
output_db_kwargs = predictions.db_kwargs
del output_db_kwargs['label_encoder']
self.assertEqual({
'feature_cols': ['col10', 'col11'],
'label_col': 'col20',
'key': 'classification',
'encode_labels': True,
}, output_db_kwargs)
def test_recommend(self):
"""acton-recommend takes and outputs a protobuf."""
db_path = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'classification_pandas.h5'))
with self.runner.isolated_filesystem():
predictions = numpy.random.random(size=(1, 2, 1))
db_kwargs = {
'feature_cols': ['col10', 'col11'],
'label_col': 'col20',
'key': 'classification',
'encode_labels': True,
}
with acton.database.PandasReader(db_path, **db_kwargs) as db:
proto = acton.proto.wrappers.Predictions.make(
labelled_ids=[1, 2, 3],
predicted_ids=[4, 5],
predictions=predictions,
predictor='LogisticRegression',
db=db)
proto = proto.proto.SerializeToString()
assert isinstance(proto, bytes)
length = struct.pack('<Q', len(proto))
result = self.runner.invoke(
acton.cli.recommend,
input=length + proto)
if result.exit_code != 0:
raise result.exception
output = result.output_bytes
length, = struct.unpack('<Q', output[:8])
proto = output[8:]
self.assertEqual(len(proto), length)
recs = acton.proto.wrappers.Recommendations.deserialise(proto)
self.assertEqual([1, 2, 3], recs.labelled_ids)
self.assertTrue(recs.proto.db.path.endswith('_pandas.h5'))
self.assertEqual({
'feature_cols': ['col10', 'col11'],
'label_col': 'col20',
'key': 'classification',
'encode_labels': True,
}, recs.db_kwargs)
|
bsd-3-clause
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/matplotlib/dviread.py
|
11
|
33923
|
"""
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor, but it is currently used by the pdf backend for
processing usetex text.
Interface::
dvi = Dvi(filename, 72)
# iterate over pages (but only one page is supported for now):
for page in dvi:
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
from matplotlib.compat import subprocess
from matplotlib import rcParams
import numpy as np
import struct
import sys
import os
if six.PY3:
def ord(x):
return x
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
self.baseline = self._get_baseline(filename)
def _get_baseline(self, filename):
if rcParams['text.latex.preview']:
base, ext = os.path.splitext(filename)
baseline_filename = base + ".baseline"
if os.path.exists(baseline_filename):
with open(baseline_filename, 'rb') as fd:
l = fd.read().split()
height, depth, width = l
return float(depth)
return None
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, boxes) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h,e = font._height_depth_of(g)
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=descent)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
if self.baseline is None:
descent = (maxy - maxy_pure) * d
else:
descent = self.baseline
text = [ ((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d - descent, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=descent)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1)[0])
self._dispatch(byte)
# if self.state == _dvistate.inpage:
# matplotlib.verbose.report(
# 'Dvi._read: after %d at %f,%f' %
# (byte, self.h, self.v),
# 'debug-annoying')
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument *nbytes* long.
Signedness is determined by the *signed* keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode *byte*, read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError("unknown command: byte %d"%byte)
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of dvi file")
if i != 2:
raise ValueError("Unknown dvi format %d"%i)
if num != 25400000 or den != 7227 * 2**16:
raise ValueError("nonstandard units in dvi file")
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError("nonstandard magnification in dvi file")
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_char in dvi file")
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_rule in dvi file")
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_char in dvi file")
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
# matplotlib.verbose.report(
# 'Dvi._put_char: %d,%d %d' %(self.h, self.v, char),
# 'debug-annoying')
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_rule in dvi file")
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
# matplotlib.verbose.report(
# 'Dvi._put_rule: %d,%d %d,%d' % (self.h, self.v, a, b),
# 'debug-annoying')
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError("misplaced bop in dvi file (state %d)" % self.state)
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced eop in dvi file")
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced push in dvi file")
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced pop in dvi file")
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced right in dvi file")
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError("misplaced w in dvi file")
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError("misplaced x in dvi file")
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError("misplaced down in dvi file")
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError("misplaced y in dvi file")
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError("misplaced z in dvi file")
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError("misplaced fnt_num in dvi file")
self.f = k
def _xxx(self, special):
if six.PY3:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and chr(ch)
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
else:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
tfm = _tfmfile(n[-l:].decode('ascii'))
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError('tfm checksum mismatch: %s'%n)
# It seems that the assumption behind the following check is incorrect:
#if d != tfm.design_size:
# raise ValueError, 'tfm design size mismatch: %d in dvi, %d in %s'%\
# (d, tfm.design_size, n)
vf = _vffile(n[-l:].decode('ascii'))
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError("misplaced post in dvi file")
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are *not* used for comparison.
The size is in Adobe points (converted from TeX points).
.. attribute:: texname
Name of the font as used internally by TeX and friends. This
is usually very different from any external font names, and
:class:`dviread.PsfontsMap` can be used to find the external
name of the font.
.. attribute:: size
Size of the font in Adobe points, converted from the slightly
smaller TeX points.
.. attribute:: widths
Widths of glyphs in glyph-space units, typically 1/1000ths of
the point size.
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
if six.PY3 and isinstance(texname, bytes):
texname = texname.decode('ascii')
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(six.iterkeys(tfm.width)) + 1
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in xrange(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
def _height_depth_of(self, char):
"""
Height and depth of char in dvi units. For internal use by dviread.py.
"""
result = []
for metric,name in ((self._tfm.height, "height"),
(self._tfm.depth, "depth")):
value = metric.get(char, None)
if value is None:
matplotlib.verbose.report(
'No %s for char %d in font %s' % (name, char, self.texname),
'debug')
result.append(0)
else:
result.append(_mul2012(value, self._scale))
return result
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
try:
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
finally:
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError("Packet length mismatch in vf file")
else:
if byte in (139, 140) or byte >= 243:
raise ValueError("Inappropriate opcode %d in vf file" % byte)
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError("unknown vf opcode %d" % byte)
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError("Misplaced packet in vf file")
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of vf file")
if i != 202:
raise ValueError("Unknown vf format %d" % i)
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
.. attribute:: checksum
Used for verifying against the dvi file.
.. attribute:: design_size
Design size of the font (in what units?)
.. attribute:: width
Width of each character, needs to be scaled by the factor
specified in the dvi file. This is a dict because indexing may
not start from 0.
.. attribute:: height
Height of each character.
.. attribute:: depth
Depth of each character.
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
with open(filename, 'rb') as file:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack(str('!6H'), header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack(str('!2I'), header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack(str('!%dI') % (len(x)/4), x)
for x in (widths, heights, depths) ]
for idx, char in enumerate(xrange(bc, ec+1)):
self.width[char] = _fix2comp(widths[ord(char_info[4*idx])])
self.height[char] = _fix2comp(heights[ord(char_info[4*idx+1]) >> 4])
self.depth[char] = _fix2comp(depths[ord(char_info[4*idx+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage::
>>> map = PsfontsMap(find_tex_file('pdftex.map'))
>>> entry = map['ptmbo8r']
>>> entry.texname
'ptmbo8r'
>>> entry.psname
'Times-Bold'
>>> entry.encoding
'/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
>>> entry.effects
{'slant': 0.16700000000000001}
>>> entry.filename
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts (i.e.,
have no filename for them, as in the Times-Bold example above),
while the pdf-related files perhaps only avoid the "Base 14" pdf
fonts. But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
with open(filename, 'rt') as file:
self._parse(file)
def __getitem__(self, texname):
try:
result = self._font[texname]
except KeyError:
result = self._font[texname.decode('ascii')]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
# If the map file specifies multiple encodings for a font, we
# follow pdfTeX in choosing the last one specified. Such
# entries are probably mistakes but they have occurred.
# http://tex.stackexchange.com/questions/10826/
# http://article.gmane.org/gmane.comp.tex.pdftex/4914
texname, psname = words[:2]
effects, encoding, filename = '', None, None
for word in words[2:]:
if not word.startswith('<'):
effects = word
else:
word = word.lstrip('<')
if word.startswith('[') or word.endswith('.enc'):
if encoding is not None:
matplotlib.verbose.report(
'Multiple encodings for %s = %s'
% (texname, psname), 'debug')
if word.startswith('['):
encoding = word[1:]
else:
encoding = word
else:
assert filename is None
filename = word
eff = effects.split()
effects = {}
try:
effects['slant'] = float(eff[eff.index('SlantFont')-1])
except ValueError:
pass
try:
effects['extend'] = float(eff[eff.index('ExtendFont')-1])
except ValueError:
pass
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
with open(filename, 'rt') as file:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + repr(self.encoding), 'debug-annoying')
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError("Broken name in encoding file: " + w)
return result
def find_tex_file(filename, format=None):
"""
Call :program:`kpsewhich` to find a file in the texmf tree. If
*format* is not None, it is used as the value for the
:option:`--format` option.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
.. seealso::
`Kpathsea documentation <http://www.tug.org/kpathsea/>`_
The library that :program:`kpsewhich` is part of.
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
# stderr is unused, but reading it avoids a subprocess optimization
# that breaks EINTR handling in some Python versions:
# http://bugs.python.org/issue12493
# https://github.com/matplotlib/matplotlib/issues/633
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result.decode('ascii')
# With multiple text objects per figure (e.g., tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print('=== new page ===')
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print('font', f.texname, 'scaled', f._scale/pow(2.0,20))
fPrev = f
print(x,y,c, 32 <= c < 128 and chr(c) or '.', w)
for x,y,w,h in page.boxes:
print(x,y,'BOX',w,h)
|
mit
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/markers.py
|
6
|
32364
|
"""
This module contains functions to handle markers. Used by both the
marker functionality of `~matplotlib.axes.Axes.plot` and
`~matplotlib.axes.Axes.scatter`.
All possible markers are defined here:
============================== ===============================================
marker description
============================== ===============================================
`"."` point
`","` pixel
`"o"` circle
`"v"` triangle_down
`"^"` triangle_up
`"<"` triangle_left
`">"` triangle_right
`"1"` tri_down
`"2"` tri_up
`"3"` tri_left
`"4"` tri_right
`"8"` octagon
`"s"` square
`"p"` pentagon
`"P"` plus (filled)
`"*"` star
`"h"` hexagon1
`"H"` hexagon2
`"+"` plus
`"x"` x
`"X"` x (filled)
`"D"` diamond
`"d"` thin_diamond
`"|"` vline
`"_"` hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft (centered at tip)
CARETRIGHT caretright (centered at tip)
CARETUP caretup (centered at tip)
CARETDOWN caretdown (centered at tip)
CARETLEFTBASE caretleft (centered at base)
CARETRIGHTBASE caretright (centered at base)
CARETUPBASE caretup (centered at base)
`"None"`, `" "` or `""` nothing
``'$...$'`` render the string using mathtext.
`verts` a list of (x, y) pairs used for Path vertices.
The center of the marker is located at (0,0) and
the size is normalized.
path a `~matplotlib.path.Path` instance.
(`numsides`, `style`, `angle`) The marker can also be a tuple (`numsides`,
`style`, `angle`), which will create a custom,
regular symbol.
`numsides`:
the number of sides
`style`:
the style of the regular symbol:
0
a regular polygon
1
a star-like symbol
2
an asterisk
3
a circle (`numsides` and `angle` is
ignored)
`angle`:
the angle of rotation of the symbol
============================== ===============================================
For backward compatibility, the form (`verts`, 0) is also accepted,
but it is equivalent to just `verts` for giving a raw set of vertices
that define the shape.
`None` is the default which means 'nothing', however this table is
referred to from other docs for the valid inputs from marker inputs and in
those cases `None` still means 'default'.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from .cbook import is_math_text, is_string_like, is_numlike, iterable
from matplotlib import rcParams
from .path import Path
from .transforms import IdentityTransform, Affine2D
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE) = list(xrange(12))
class MarkerStyle(object):
markers = {
'.': 'point',
',': 'pixel',
'o': 'circle',
'v': 'triangle_down',
'^': 'triangle_up',
'<': 'triangle_left',
'>': 'triangle_right',
'1': 'tri_down',
'2': 'tri_up',
'3': 'tri_left',
'4': 'tri_right',
'8': 'octagon',
's': 'square',
'p': 'pentagon',
'*': 'star',
'h': 'hexagon1',
'H': 'hexagon2',
'+': 'plus',
'x': 'x',
'D': 'diamond',
'd': 'thin_diamond',
'|': 'vline',
'_': 'hline',
'P': 'plus_filled',
'X': 'x_filled',
TICKLEFT: 'tickleft',
TICKRIGHT: 'tickright',
TICKUP: 'tickup',
TICKDOWN: 'tickdown',
CARETLEFT: 'caretleft',
CARETRIGHT: 'caretright',
CARETUP: 'caretup',
CARETDOWN: 'caretdown',
CARETLEFTBASE: 'caretleftbase',
CARETRIGHTBASE: 'caretrightbase',
CARETUPBASE: 'caretupbase',
CARETDOWNBASE: 'caretdownbase',
"None": 'nothing',
None: 'nothing',
' ': 'nothing',
'': 'nothing'
}
# Just used for informational purposes. is_filled()
# is calculated in the _set_* functions.
filled_markers = (
'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd',
'P', 'X')
fillstyles = ('full', 'left', 'right', 'bottom', 'top', 'none')
_half_fillstyles = ('left', 'right', 'bottom', 'top')
# TODO: Is this ever used as a non-constant?
_point_size_reduction = 0.5
def __init__(self, marker=None, fillstyle=None):
"""
MarkerStyle
Attributes
----------
markers : list of known markes
fillstyles : list of known fillstyles
filled_markers : list of known filled markers.
Parameters
----------
marker : string or array_like, optional, default: None
See the descriptions of possible markers in the module docstring.
fillstyle : string, optional, default: 'full'
'full', 'left", 'right', 'bottom', 'top', 'none'
"""
# The fillstyle has to be set here as it might be accessed by calls to
# _recache() in set_marker.
self._fillstyle = fillstyle
self.set_marker(marker)
self.set_fillstyle(fillstyle)
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_marker_function')
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
self.set_marker(self._marker)
self._recache()
def _recache(self):
self._path = Path(np.empty((0, 2)))
self._transform = IdentityTransform()
self._alt_path = None
self._alt_transform = None
self._snap_threshold = None
self._joinstyle = 'round'
self._capstyle = 'butt'
self._filled = True
self._marker_function()
if six.PY3:
def __bool__(self):
return bool(len(self._path.vertices))
else:
def __nonzero__(self):
return bool(len(self._path.vertices))
def is_filled(self):
return self._filled
def get_fillstyle(self):
return self._fillstyle
def set_fillstyle(self, fillstyle):
"""
Sets fillstyle
Parameters
----------
fillstyle : string amongst known fillstyles
"""
if fillstyle is None:
fillstyle = rcParams['markers.fillstyle']
if fillstyle not in self.fillstyles:
raise ValueError("Unrecognized fillstyle %s"
% ' '.join(self.fillstyles))
self._fillstyle = fillstyle
self._recache()
def get_joinstyle(self):
return self._joinstyle
def get_capstyle(self):
return self._capstyle
def get_marker(self):
return self._marker
def set_marker(self, marker):
if (iterable(marker) and len(marker) in (2, 3) and
marker[1] in (0, 1, 2, 3)):
self._marker_function = self._set_tuple_marker
elif isinstance(marker, np.ndarray):
self._marker_function = self._set_vertices
elif not isinstance(marker, list) and marker in self.markers:
self._marker_function = getattr(
self, '_set_' + self.markers[marker])
elif is_string_like(marker) and is_math_text(marker):
self._marker_function = self._set_mathtext_path
elif isinstance(marker, Path):
self._marker_function = self._set_path_marker
else:
try:
Path(marker)
self._marker_function = self._set_vertices
except ValueError:
raise ValueError('Unrecognized marker style'
' {0}'.format(marker))
self._marker = marker
self._recache()
def get_path(self):
return self._path
def get_transform(self):
return self._transform.frozen()
def get_alt_path(self):
return self._alt_path
def get_alt_transform(self):
return self._alt_transform.frozen()
def get_snap_threshold(self):
return self._snap_threshold
def _set_nothing(self):
self._filled = False
def _set_custom_marker(self, path):
verts = path.vertices
rescale = max(np.max(np.abs(verts[:, 0])),
np.max(np.abs(verts[:, 1])))
self._transform = Affine2D().scale(0.5 / rescale)
self._path = path
def _set_path_marker(self):
self._set_custom_marker(self._marker)
def _set_vertices(self):
verts = self._marker
marker = Path(verts)
self._set_custom_marker(marker)
def _set_tuple_marker(self):
marker = self._marker
if is_numlike(marker[0]):
if len(marker) == 2:
numsides, rotation = marker[0], 0.0
elif len(marker) == 3:
numsides, rotation = marker[0], marker[2]
symstyle = marker[1]
if symstyle == 0:
self._path = Path.unit_regular_polygon(numsides)
self._joinstyle = 'miter'
elif symstyle == 1:
self._path = Path.unit_regular_star(numsides)
self._joinstyle = 'bevel'
elif symstyle == 2:
self._path = Path.unit_regular_asterisk(numsides)
self._filled = False
self._joinstyle = 'bevel'
elif symstyle == 3:
self._path = Path.unit_circle()
self._transform = Affine2D().scale(0.5).rotate_deg(rotation)
else:
verts = np.asarray(marker[0])
path = Path(verts)
self._set_custom_marker(path)
def _set_mathtext_path(self):
"""
Draws mathtext markers '$...$' using TextPath object.
Submitted by tcb
"""
from matplotlib.text import TextPath
from matplotlib.font_manager import FontProperties
# again, the properties could be initialised just once outside
# this function
# Font size is irrelevant here, it will be rescaled based on
# the drawn size later
props = FontProperties(size=1.0)
text = TextPath(xy=(0, 0), s=self.get_marker(), fontproperties=props,
usetex=rcParams['text.usetex'])
if len(text.vertices) == 0:
return
xmin, ymin = text.vertices.min(axis=0)
xmax, ymax = text.vertices.max(axis=0)
width = xmax - xmin
height = ymax - ymin
max_dim = max(width, height)
self._transform = Affine2D() \
.translate(-xmin + 0.5 * -width, -ymin + 0.5 * -height) \
.scale(1.0 / max_dim)
self._path = text
self._snap = False
def _half_fill(self):
fs = self.get_fillstyle()
result = fs in self._half_fillstyles
return result
def _set_circle(self, reduction=1.0):
self._transform = Affine2D().scale(0.5 * reduction)
self._snap_threshold = np.inf
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_circle()
else:
# build a right-half circle
if fs == 'bottom':
rotate = 270.
elif fs == 'top':
rotate = 90.
elif fs == 'left':
rotate = 180.
else:
rotate = 0.
self._path = self._alt_path = Path.unit_circle_righthalf()
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform.frozen().rotate_deg(180.)
def _set_pixel(self):
self._path = Path.unit_rectangle()
# Ideally, you'd want -0.5, -0.5 here, but then the snapping
# algorithm in the Agg backend will round this to a 2x2
# rectangle from (-1, -1) to (1, 1). By offsetting it
# slightly, we can force it to be (0, 0) to (1, 1), which both
# makes it only be a single pixel and places it correctly
# aligned to 1-width stroking (i.e. the ticks). This hack is
# the best of a number of bad alternatives, mainly because the
# backends are not aware of what marker is actually being used
# beyond just its path data.
self._transform = Affine2D().translate(-0.49999, -0.49999)
self._snap_threshold = None
def _set_point(self):
self._set_circle(reduction=self._point_size_reduction)
_triangle_path = Path(
[[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
# Going down halfway looks to small. Golden ratio is too far.
_triangle_path_u = Path(
[[0.0, 1.0], [-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_d = Path(
[[-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [1.0, -1.0], [-1.0, -1.0],
[-3 / 5., -1 / 5.]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_l = Path(
[[0.0, 1.0], [0.0, -1.0], [-1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_r = Path(
[[0.0, 1.0], [0.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
def _set_triangle(self, rot, skip):
self._transform = Affine2D().scale(0.5, 0.5).rotate_deg(rot)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = self._triangle_path
else:
mpaths = [self._triangle_path_u,
self._triangle_path_l,
self._triangle_path_d,
self._triangle_path_r]
if fs == 'top':
self._path = mpaths[(0 + skip) % 4]
self._alt_path = mpaths[(2 + skip) % 4]
elif fs == 'bottom':
self._path = mpaths[(2 + skip) % 4]
self._alt_path = mpaths[(0 + skip) % 4]
elif fs == 'left':
self._path = mpaths[(1 + skip) % 4]
self._alt_path = mpaths[(3 + skip) % 4]
else:
self._path = mpaths[(3 + skip) % 4]
self._alt_path = mpaths[(1 + skip) % 4]
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_triangle_up(self):
return self._set_triangle(0.0, 0)
def _set_triangle_down(self):
return self._set_triangle(180.0, 2)
def _set_triangle_left(self):
return self._set_triangle(90.0, 3)
def _set_triangle_right(self):
return self._set_triangle(270.0, 1)
def _set_square(self):
self._transform = Affine2D().translate(-0.5, -0.5)
self._snap_threshold = 2.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
# build a bottom filled square out of two rectangles, one
# filled. Use the rotation to support left, right, bottom
# or top
if fs == 'bottom':
rotate = 0.
elif fs == 'top':
rotate = 180.
elif fs == 'left':
rotate = 270.
else:
rotate = 90.
self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 0.5],
[0.0, 0.5], [0.0, 0.0]])
self._alt_path = Path([[0.0, 0.5], [1.0, 0.5], [1.0, 1.0],
[0.0, 1.0], [0.0, 0.5]])
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_diamond(self):
self._transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]])
self._alt_path = Path([[0.0, 0.0], [0.0, 1.0],
[1.0, 1.0], [0.0, 0.0]])
if fs == 'bottom':
rotate = 270.
elif fs == 'top':
rotate = 90.
elif fs == 'left':
rotate = 180.
else:
rotate = 0.
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_thin_diamond(self):
self._set_diamond()
self._transform.scale(0.6, 1.0)
def _set_pentagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
polypath = Path.unit_regular_polygon(5)
fs = self.get_fillstyle()
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
y = (1 + np.sqrt(5)) / 4.
top = Path([verts[0], verts[1], verts[4], verts[0]])
bottom = Path([verts[1], verts[2], verts[3], verts[4], verts[1]])
left = Path([verts[0], verts[1], verts[2], [0, -y], verts[0]])
right = Path([verts[0], verts[4], verts[3], [0, -y], verts[0]])
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_star(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
polypath = Path.unit_regular_star(5, innerCircle=0.381966)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
top = Path(np.vstack((verts[0:4, :], verts[7:10, :], verts[0])))
bottom = Path(np.vstack((verts[3:8, :], verts[3])))
left = Path(np.vstack((verts[0:6, :], verts[0])))
right = Path(np.vstack((verts[0], verts[5:10, :], verts[0])))
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'bevel'
def _set_hexagon1(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = None
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x = np.abs(np.cos(5 * np.pi / 6.))
top = Path(np.vstack(([-x, 0], verts[(1, 0, 5), :], [x, 0])))
bottom = Path(np.vstack(([-x, 0], verts[2:5, :], [x, 0])))
left = Path(verts[(0, 1, 2, 3), :])
right = Path(verts[(0, 5, 4, 3), :])
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_hexagon2(self):
self._transform = Affine2D().scale(0.5).rotate_deg(30)
self._snap_threshold = None
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x, y = np.sqrt(3) / 4, 3 / 4.
top = Path(verts[(1, 0, 5, 4, 1), :])
bottom = Path(verts[(1, 2, 3, 4), :])
left = Path(np.vstack(([x, y], verts[(0, 1, 2), :],
[-x, -y], [x, y])))
right = Path(np.vstack(([x, y], verts[(5, 4, 3), :], [-x, -y])))
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_octagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(8)
if not self._half_fill():
self._transform.rotate_deg(22.5)
self._path = polypath
else:
x = np.sqrt(2.) / 4.
half = Path([[0, -1], [0, 1], [-x, 1], [-1, x],
[-1, -x], [-x, -1], [0, -1]])
if fs == 'bottom':
rotate = 90.
elif fs == 'top':
rotate = 270.
elif fs == 'right':
rotate = 180.
else:
rotate = 0.
self._transform.rotate_deg(rotate)
self._path = self._alt_path = half
self._alt_transform = self._transform.frozen().rotate_deg(180.0)
self._joinstyle = 'miter'
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _set_vline(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._line_marker_path
def _set_hline(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 1.0
self._filled = False
self._path = self._line_marker_path
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _set_tickleft(self):
self._transform = Affine2D().scale(-1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
def _set_tickright(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _set_tickup(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
def _set_tickdown(self):
self._transform = Affine2D().scale(1.0, -1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_plus(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._plus_path
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_tri_down(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_up(self):
self._transform = Affine2D().scale(0.5).rotate_deg(180)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_left(self):
self._transform = Affine2D().scale(0.5).rotate_deg(270)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_right(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _set_caretdown(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretup(self):
self._transform = Affine2D().scale(0.5).rotate_deg(180)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretleft(self):
self._transform = Affine2D().scale(0.5).rotate_deg(270)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretright(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
_caret_path_base = Path([[-1.0, 0.0], [0.0, -1.5], [1.0, 0]])
def _set_caretdownbase(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path_base
self._joinstyle = 'miter'
def _set_caretupbase(self):
self._transform = Affine2D().scale(0.5).rotate_deg(180)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path_base
self._joinstyle = 'miter'
def _set_caretleftbase(self):
self._transform = Affine2D().scale(0.5).rotate_deg(270)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path_base
self._joinstyle = 'miter'
def _set_caretrightbase(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path_base
self._joinstyle = 'miter'
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_x(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._x_path
_plus_filled_path = Path([(1/3, 0), (2/3, 0), (2/3, 1/3),
(1, 1/3), (1, 2/3), (2/3, 2/3),
(2/3, 1), (1/3, 1), (1/3, 2/3),
(0, 2/3), (0, 1/3), (1/3, 1/3),
(1/3, 0)],
[Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
_plus_filled_path_t = Path([(1, 1/2), (1, 2/3), (2/3, 2/3),
(2/3, 1), (1/3, 1), (1/3, 2/3),
(0, 2/3), (0, 1/2), (1, 1/2)],
[Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
def _set_plus_filled(self):
self._transform = Affine2D().translate(-0.5, -0.5)
self._snap_threshold = 5.0
self._joinstyle = 'miter'
fs = self.get_fillstyle()
if not self._half_fill():
self._path = self._plus_filled_path
else:
# Rotate top half path to support all partitions
if fs == 'top':
rotate, rotate_alt = 0, 180
elif fs == 'bottom':
rotate, rotate_alt = 180, 0
elif fs == 'left':
rotate, rotate_alt = 90, 270
else:
rotate, rotate_alt = 270, 90
self._path = self._plus_filled_path_t
self._alt_path = self._plus_filled_path_t
self._alt_transform = Affine2D().translate(-0.5, -0.5)
self._transform.rotate_deg(rotate)
self._alt_transform.rotate_deg(rotate_alt)
_x_filled_path = Path([(0.25, 0), (0.5, 0.25), (0.75, 0), (1, 0.25),
(0.75, 0.5), (1, 0.75), (0.75, 1), (0.5, 0.75),
(0.25, 1), (0, 0.75), (0.25, 0.5), (0, 0.25),
(0.25, 0)],
[Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
_x_filled_path_t = Path([(0.75, 0.5), (1, 0.75), (0.75, 1),
(0.5, 0.75), (0.25, 1), (0, 0.75),
(0.25, 0.5), (0.75, 0.5)],
[Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY])
def _set_x_filled(self):
self._transform = Affine2D().translate(-0.5, -0.5)
self._snap_threshold = 5.0
self._joinstyle = 'miter'
fs = self.get_fillstyle()
if not self._half_fill():
self._path = self._x_filled_path
else:
# Rotate top half path to support all partitions
if fs == 'top':
rotate, rotate_alt = 0, 180
elif fs == 'bottom':
rotate, rotate_alt = 180, 0
elif fs == 'left':
rotate, rotate_alt = 90, 270
else:
rotate, rotate_alt = 270, 90
self._path = self._x_filled_path_t
self._alt_path = self._x_filled_path_t
self._alt_transform = Affine2D().translate(-0.5, -0.5)
self._transform.rotate_deg(rotate)
self._alt_transform.rotate_deg(rotate_alt)
|
gpl-3.0
|
daphne-yu/aubio
|
python/demos/demo_waveform_plot.py
|
4
|
2074
|
#! /usr/bin/env python
import sys
from aubio import pvoc, source
from numpy import zeros, hstack
def get_waveform_plot(filename, samplerate = 0, block_size = 4096, ax = None):
import matplotlib.pyplot as plt
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
hop_s = block_size
allsamples_max = zeros(0,)
downsample = 2**4 # to plot n samples / hop_s
a = source(filename, samplerate, hop_s) # source file
if samplerate == 0: samplerate = a.samplerate
total_frames = 0
while True:
samples, read = a()
# keep some data to plot it later
new_maxes = (abs(samples.reshape(hop_s/downsample, downsample))).max(axis=0)
allsamples_max = hstack([allsamples_max, new_maxes])
total_frames += read
if read < hop_s: break
allsamples_max = (allsamples_max > 0) * allsamples_max
allsamples_max_times = [ ( float (t) / downsample ) * hop_s for t in range(len(allsamples_max)) ]
ax.plot(allsamples_max_times, allsamples_max, '-b')
ax.plot(allsamples_max_times, -allsamples_max, '-b')
ax.axis(xmin = allsamples_max_times[0], xmax = allsamples_max_times[-1])
set_xlabels_sample2time(ax, allsamples_max_times[-1], samplerate)
return ax
def set_xlabels_sample2time(ax, latest_sample, samplerate):
ax.axis(xmin = 0, xmax = latest_sample)
if latest_sample / float(samplerate) > 60:
ax.set_xlabel('time (mm:ss)')
ax.set_xticklabels([ "%02d:%02d" % (t/float(samplerate)/60, (t/float(samplerate))%60) for t in ax.get_xticks()[:-1]], rotation = 50)
else:
ax.set_xlabel('time (ss.mm)')
ax.set_xticklabels([ "%02d.%02d" % (t/float(samplerate), 100*((t/float(samplerate))%1) ) for t in ax.get_xticks()[:-1]], rotation = 50)
if __name__ == '__main__':
import matplotlib.pyplot as plt
if len(sys.argv) < 2:
print "Usage: %s <filename>" % sys.argv[0]
else:
for soundfile in sys.argv[1:]:
get_waveform_plot(soundfile)
# display graph
plt.show()
|
gpl-3.0
|
mdeff/ntds_2017
|
projects/reports/graphlang/graph.py
|
1
|
7730
|
"""
Various methods related to graph construction and analysis
"""
from preprocess import *
import numpy as np
import networkx as nx
import community
from matplotlib import pyplot as plt
def build_link_exp_decay(adj, weight, words_map, words, from_index, to_index, max_dist, stopwords, links_to_stopwords=True, self_links=False):
"""
Builds a link from a given word in the graph to another word at a defined index.
The farther the other word, the smaller the edge between them.
In this variant, the weight of the edges decays exponentially with the distance
"""
words_len = len(words)
links_made = 0
while to_index < words_len and (words[to_index] in string.punctuation or (not links_to_stopwords and words[to_index] in stopwords) or (not self_links and words[to_index] == words[from_index])):
to_index += 1
weight /= 2
if (to_index - from_index) <= max_dist and to_index < len(words):
links_made = 1
adj[words_map[words[from_index]], words_map[words[to_index]]] = adj[words_map[words[from_index]], words_map[words[to_index]]] + weight
weight /= 2
return weight, to_index + 1, links_made
def build_link(adj, weight, words_map, words, from_index, to_index, max_dist, stopwords, links_to_stopwords=True, self_links=False):
"""
Builds a link from a given word in the graph to another word at a defined index.
The farther the other word, the smaller the edge between them.
"""
links_made = 0
if(weight <= 0):
weight, to_index + 1, links_made
words_len = len(words)
while to_index < words_len and (words[to_index] in string.punctuation or (not links_to_stopwords and words[to_index] in stopwords) or (not self_links and words[to_index] == words[from_index])):
to_index += 1
weight -= 1
if (to_index - from_index) <= max_dist and to_index < len(words):
links_made = 1
adj[words_map[words[from_index]], words_map[words[to_index]]] = adj[words_map[words[from_index]], words_map[words[to_index]]] + weight
weight -= 1
return weight, to_index + 1, links_made
def build_graph(lemmas, lemmas_map, max_dist=20, nlinks=4, max_weight=16, lang=None, links_from_stopwords=True, links_to_stopwords=True, self_links=False):
len_dist_lemmas = len(lemmas_map)
len_lemmas = len(lemmas)
adj = np.zeros((len_dist_lemmas, len_dist_lemmas))
if(lang != None and (not links_from_stopwords or not links_to_stopwords)):
stopwords = nltk.corpus.stopwords.words(lang)
for index, lemma in enumerate(lemmas):
if lemma in string.punctuation or (not links_from_stopwords and lemma in stopwords):
continue
weight = max_dist
next_index = index + 1
total_links_made = 0
for i in range(0, max_dist):
weight, next_index, links_made = build_link(adj, weight, lemmas_map, lemmas, index, next_index, max_dist, stopwords, links_to_stopwords, self_links)
total_links_made += links_made
if(total_links_made >= nlinks or weight <= 0):
break
return adj
def text_to_graph(text, undirected=False, subsample=1., normalization="lem", lang="english", words_lower=True, no_punct_nodes=True, nlinks=4, max_dist=20, max_weight=16, ignore_punct=True, ignore_stopwords=False, links_from_stopwords=True, links_to_stopwords=True, self_links=False, return_words_map=False):
if(ignore_stopwords):
links_from_stopwords = False
links_to_stopwords = False
if normalization == "lem":
words = words_lems(text, lower=words_lower, ignore_punct=ignore_punct)
elif normalization == "stem":
words = words_stems(text, lang=lang, lower=words_lower)
if(subsample < 1. and subsample > 0):
sub_len = subsample * len(words)
words = words[:int(sub_len)]
words_map = words_to_int(words, lang=lang, ignore_punct=no_punct_nodes, ignore_stopwords=ignore_stopwords)
graph = build_graph(words, words_map, lang=lang, max_dist=max_dist, max_weight=max_weight, links_from_stopwords=links_from_stopwords, links_to_stopwords=links_to_stopwords)
if(undirected):
graph += graph.T
if(return_words_map):
return (graph, words_map)
else:
return graph
def get_n_closest_words(graph, word_map, word, n_words=10):
index = word_map[word]
word_map_inversed = {i[1]:i[0] for i in word_map.items()}
return [word_map_inversed[np.argsort(graph[index])[::-1][i]] for i in range(n_words)]
def sparsity(m):
return 1 - np.count_nonzero(m) / m.size
def np_to_nx(M, words_map=None):
G = nx.from_numpy_matrix(M)
if(words_map != None):
words_map_inv = {e[1]:e[0] for e in words_map.items()}
for n in G:
G.nodes[n]["word"] = words_map_inv[n]
return G
def compute_betweenness(G, weight="weight"):
betweenness = nx.betweenness_centrality(G, weight=weight)
for n in G:
G.nodes[n]["betweenness"] = betweenness[n]
return betweenness
def scale_betweenness(betweenness, min_=10, max_=120):
"""
Scales the values of the betweenness dictionary to a certain range of values
"""
max_el = max(betweenness.items(), key=lambda el: el[1])[1]
mult = max_ / (max_el + min_)
betweenness_scaled = {k: mult*v + min_ for k,v in betweenness.items()}
return betweenness_scaled
def community_partition(G, weight="weight"):
if(weight == "betweenness" and G.nodes()[0].get("betweenness") == None):
compute_betweenness(G)
return community.best_partition(G, weight=weight)
def communities(G, draw=True, cmap=None, pos=None, partition=None, betweenness_scaled=None):
"""
Computes the communities using the Louvain heuristics
"""
if(partition == None):
partition = community_partition(G, weight="betweenness")
if(betweenness_scaled == None):
if(G.nodes()[0].get("betweenness") == None):
betweenness = compute_betweenness(G, "betweenness")
else:
betweenness = nx.get_node_attributes(G, "betweenness")
betweenness_scaled = scale_betweenness(betweenness)
if(pos == None):
pos = nx.spring_layout(G)
if(draw and cmap):
count = 0.
for com in set(partition.values()):
count = count + 1.
list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com]
sizes = [betweenness_scaled[node] for node in list_nodes]
nx.draw_networkx_nodes(G, pos, list_nodes, node_size=sizes, node_color=cmap[com])
nx.draw_networkx_edges(G, pos, alpha=0.05)
return pos, partition, betweenness_scaled
def induced_graph(original_graph, partition, induced_graph=None, rescale_node_size=1., draw=True, cmap=None, words_map_inv=None, pos=None, betweenness_scaled=None):
"""
Returns the graph induced from the community partition of the graph
"""
if(induced_graph == None):
induced_graph = community.induced_graph(partition, original_graph, weight="weight")
if(draw and cmap):
if(pos == None):
pos = nx.spring_layout(induced_graph)
w = induced_graph.degree(weight="weight")
sizes = [w[node]*rescale_node_size for node in induced_graph.nodes()]
nx.draw(induced_graph, pos=pos, node_size=sizes, node_color=[cmap[n] for n in induced_graph.nodes()])
labels = {}
for com in induced_graph.nodes():
rep = max([nodes for nodes in partition.keys() if partition[nodes] == com], key=lambda n: original_graph.degree(n, weight="weight"))
labels[com] = words_map_inv[rep]
nx.draw_networkx_labels(induced_graph, pos, labels, font_size=16)
return induced_graph
|
mit
|
Myasuka/scikit-learn
|
sklearn/metrics/ranking.py
|
79
|
25426
|
"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
|
bsd-3-clause
|
neuropoly/spinalcordtoolbox
|
spinalcordtoolbox/scripts/sct_compute_ernst_angle.py
|
1
|
6091
|
#!/usr/bin/env python
#########################################################################################
#
# All sort of utilities for labels.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Sara Dupont
# Modified: 2015-02-17
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
import os
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar
from spinalcordtoolbox.utils.sys import init_sct, printv, set_global_loglevel
# DEFAULT PARAMETERS
class Param:
# The constructor
def __init__(self):
self.debug = 0
self.verbose = 1
self.t1 = 0
class ErnstAngle:
# The constructor
def __init__(self, t1, tr=None, fname_output=None):
self.t1 = t1
self.tr = tr
self.fname_output = fname_output
# compute and return the Ernst Angle
def getErnstAngle(self, tr):
from numpy import arccos
from numpy import exp
from math import pi
angle_rad = arccos(exp(-tr / self.t1))
angle_deg = angle_rad * 180 / pi
return angle_deg
# draw the graph
def draw(self, tr_min, tr_max):
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from numpy import arange
step = (tr_max - tr_min) / 50
tr_range = arange(tr_min, tr_max, step)
theta_E = self.getErnstAngle(tr_range)
printv("\nDrawing", type='info')
plt.plot(tr_range, theta_E, linewidth=1.0)
plt.xlabel("TR (in $ms$)")
plt.ylabel("$\Theta_E$ (in degree)")
plt.ylim(min(theta_E), max(theta_E) + 2)
plt.title("Ernst Angle with T1=" + str(self.t1) + "ms")
plt.grid(True)
if self.tr is not None:
plt.plot(self.tr, self.getErnstAngle(self.tr), 'ro')
if self.fname_output is not None :
printv("\nSaving figure", type='info')
plt.savefig(self.fname_output, format='png')
plt.show()
def get_parser():
parser = SCTArgumentParser(
description='Function to compute the Ernst Angle. For examples of T1 values in the brain, see Wansapura et al. '
'NMR relaxation times in the human brain at 3.0 tesla. Journal of magnetic resonance imaging : '
'JMRI (1999) vol. 9 (4) pp. 531-8. \nT1 in WM: 832ms\nT1 in GM: 1331ms'
)
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
"-tr",
type=float,
required=True,
help='Value of TR (in ms) to get the Ernst Angle. Example: 2000',
metavar=Metavar.float,
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
optional.add_argument(
"-t1",
type=float,
help='T1 value (in ms). Example: 832.3',
required=False,
metavar=Metavar.float,
default=832.0)
optional.add_argument(
"-b",
type=float,
nargs='*',
metavar=Metavar.float,
help='Min/Max range of TR (in ms) separated with space. Only use with -v 2. Example: 500 3500',
required=False)
optional.add_argument(
"-o",
help="Name of the output file containing Ernst angle result.",
required=False,
metavar=Metavar.str,
default="ernst_angle.txt")
optional.add_argument(
"-ofig",
help="Name of the output graph. Only use with -v 2.",
required=False,
metavar=Metavar.str,
default="ernst_angle.png")
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# main
#=======================================================================================================================
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
# Initialization
param = Param()
input_t1 = arguments.t1
input_fname_output = None
input_tr_min = 500
input_tr_max = 3500
input_tr = None
fname_output_file = arguments.o
if arguments.ofig is not None:
input_fname_output = arguments.ofig
if arguments.b is not None:
input_tr_min = arguments.b[0]
input_tr_max = arguments.b[1]
if arguments.tr is not None:
input_tr = arguments.tr
graph = ErnstAngle(input_t1, tr=input_tr, fname_output=input_fname_output)
if input_tr is not None:
printv("\nValue of the Ernst Angle with T1=" + str(graph.t1) + "ms and TR=" + str(input_tr) + "ms :", verbose=verbose, type='info')
printv(str(graph.getErnstAngle(input_tr)))
if input_tr > input_tr_max:
input_tr_max = input_tr + 500
elif input_tr < input_tr_min:
input_tr_min = input_tr - 500
# save text file
try:
f = open(fname_output_file, 'w')
f.write(str(graph.getErnstAngle(input_tr)))
f.close()
except:
printv('\nERROR: Cannot open file'+fname_output_file, '1', 'error')
if verbose == 2:
graph.draw(input_tr_min, input_tr_max)
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
|
mit
|
henrikalmer/genre-recognition
|
knn.py
|
1
|
1181
|
import json
import numpy as np
import pylab as pl
from sklearn import neighbors
from data import load_features, load_mfccs
# Load and divide data
##songs, prop_dict = load_mfccs()
songs, prop_dict = load_features()
training_set = dict([e for e in prop_dict.items()[0:30]])
testing_set = dict([e for e in prop_dict.items()[30:]])
# Train k-NN
fo = open('data/drums.genres.json', 'r')
genres = json.loads(fo.read())
cmap = {'pop': 0, 'rock': 1, 'reggae': 2, 'jazz': 3, 'classical': 4}
classes = [cmap[genres[k]] for k in training_set.keys()]
fo.close()
X = np.array([p for k, p in training_set.items()])
Y = np.array(classes)
n_neighbors = 9
clf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance').fit(X, Y)
# Test k-NN
validate = np.array([p for k, p in testing_set.items()])
hits = 0.0
misses = 0.0
for s, p in [(s, p) for (s, p) in testing_set.items()]:
s_name = s.split('/')[2].split('.')[0]
prediction = clf.predict(prop_dict[s])
answer = cmap[genres[s]]
if answer == prediction:
hits += 1
else:
misses += 1
print s_name, ': ', answer, prediction, answer == prediction
print 'Success rate: ', hits/(hits+misses)
|
gpl-2.0
|
valexandersaulys/prudential_insurance_kaggle
|
venv/lib/python2.7/site-packages/sklearn/cluster/birch.py
|
207
|
22706
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
gpl-2.0
|
cwu2011/scikit-learn
|
examples/svm/plot_separating_hyperplane.py
|
62
|
1274
|
"""
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machines classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
brandon-rhodes/numpy
|
numpy/core/function_base.py
|
16
|
6408
|
from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import array, result_type, NaN
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
if num > 1:
delta = stop - start
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y *= delta
else:
y *= step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
|
bsd-3-clause
|
eickenberg/scikit-learn
|
sklearn/utils/setup.py
|
296
|
2884
|
import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
avoyages/etfa2015
|
mtf_python3_v1/mtf/utils/dic.py
|
1
|
12317
|
#!/usr/bin/python3
import itertools
import pandas as pd
'''
#list of smart address for fuzzing --add value 32768 65535 and 10000, 20000,40000, 50000
Common_add_fuz=[0,1,2,3,4,254,255,256,257,511,512,513,1023,1024,1025,2047,2048,2049,4095,4096,4097,8195,8196,8197,16383,16384,16385,32762,32763,32764,32769,65531,65532,65533,65534,65535]
#Use to Invalid_quantity, smart value contiguous registers (1 to 123 registers) for 16 (0x10)/
qua_IN_REG_HO_REG=[0,1,2,3,64,123,124,125,126,127,511,512,513,1024,2047,2048,2049,4095,4096,4097,5000,8196,10000,32762,32763,32764,32769,65333,65534,65535] #Quantity 1 to 125 (0x7D)
'qua_COILS_DIS_IN':[0,1,2,3,64,123,124,125,126,127,511,512,513,1000,1998,1999,2000,2001,2002,2047,2048,2049,4095,4096,4097,5000,8196,10000,32762,32763,32764,32769,65333,65534,65535] #Registers 1 to 2000 (0x7D)
#23 (0x17) Read/Write Multiple registers/Quantity to Read=125/Quantity to Write =121
"qua_WR_MU_REG_RW_Multiple':[0,1,2,3,63,64,119,120,121,122,123,124,125,126,127,511,512,513,1024,2048,2049,4096,4097,5000,8196,10000,32762,32763,32764,32769,65333,65534,65535]
"qua_W_MUL_COILS' :[0,1,2,3,64,123,124,125,126,127,511,512,513,984,1966,1967,1968,1999,2000,2001,2002,2047,2048,4096,4097,5000,8196,10000,32762,32763,32764,32769,65333,65534,65535]
#Quantity to Write =121 /fuzze field value
'value_w_fc23': [0,1,2,64,119,120,122,123,121,122,123,124,125,512,513,1024,2048,2049,4096,4097,5000,8196,10000,32762,32763,32764,32769,65533,65534,65535]
'value_w_byte_count':[0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]
#FC 20 (0x14), FC 21, fc 23 set Configuration interest value for fuzzing field PDU
'value_test_refer_type':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 249, 250, 251, 252, 253, 254, 255] #Reference Type #Reference Type list
'value_test_Byte_count':[0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] # normal x07 to 0xF5 /7-245 /one BYTES
'value_test_file_number':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 255, 256, 257, 511, 512, 513, 1023, 1024, 1025, 2047, 2048, 2049, 4095, 4096, 4097, 8191, 8192, 8193, 16383, 16384, 16385, 32767, 32768, 32769, 65471, 65472, 65473, 65503, 65504, 65505, 65519, 65520, 65521, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535]
'value_test_record_number':[0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 255, 256, 257, 511, 512, 513, 1023, 1024, 1025, 2047, 2048, 2049, 4095, 4096, 4097, 8191, 8192, 8193, 9993, 9994, 9995, 9996, 9997, 9998, 9999, 10000, 10001, 10002, 10003, 10004, 16383, 16384, 16385, 32767, 32768, 32769, 65471, 65472, 65473, 65503, 65504, 65505, 65519, 65520, 65521, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535]
'value_test_record_length':[0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 255, 256, 257, 511, 512, 513, 1023, 1024, 1025, 2047, 2048, 2049, 4095, 4096, 4097, 8191, 8192, 8193, 16383, 16384, 16385, 32767, 32768, 32769, 65471, 65472, 65473, 65503, 65504, 65505, 65519, 65520, 65521, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535]
#ADU 1453 +7 = 1460 B MAX ,max packet 260B
'foo_len'= [0, 1,2,3,4,5,6,7,8,9,10,255,256,257,258,259,260,261,262,263,264,511,512,513,1024,2048,2049,1452,1451,1454,1455,1461,1462,1459,1458,2048,2049,4096,4097,5000,8196,10000,32762,32763,32764,32769,65534,65533,65535]
FC List
Public codes the non-contiguous ranges {1-64, 73-99, 111-127}.
User-defined codes in the ranges {65-72, 100-110}
flag_i=0
'foo_fct': [0,(7,8,9,11,12,17,43),list(range(65,73)),list(range(100,110)),list(range(111,128)),list(range(73,80)),list(range(1,65))]
#List for choise fuzzing field PDU for each FC
fp= [ 'repeat','random_pdu','remove','message']
f_mbap=['len' ,'clone','transId', 'protoId', 'unitId', ]
payload_pdu=['diagnostics','randByte','randBit','zerobyte','corrupt_bytes','corrupt_bits','little_endian_payload','sendbad', 'sendresponse','exception']
f_reg=['function_code', 'starting_address', 'quantity_of_x']
f_wr=['function_code', 'starting_address', 'output_value']
f_mul_coil_reg=['function_code', 'starting_address','quantity_of_x','byte_count','value']
f_read_file_rec=['function_code','Byte_Count','Reference_Type','File_number','Record_number','Record_length']
f_write_file_rec=['Data_length','Reference_Type','File_number','Record_number','Record_length','Record_data']
f_mask=['function_code', 'and_mask','or_mask']
f_rw_reg=['function_code', 'read_count','write_count','write_byte_count', 'value']
# dictionary of lists of smart address for fuzzing --add value 32768 65535 and 10000, 20000, 40000, 50000
#--Use to Invalid_quantity, smart value contiguous registers (1 to 123 registers) for 16 (0x10)/
#qua_IN_REG_HO_REG and qua_COILS_DIS_IN
#--#23 (0x17) Read/Write Multiple registers/Quantity to Read=125/Quantity to Write=121
#--FC 23 Quantity to Write =121 /fuzze field value and byte count
#FC 20 (0x14), FC 21, fc 23 set Configuration interest value for fuzzing field PDU
#Fuzzing len, ADU 1453 +7 = 1460 B MAX and max packet 260B
'''
Dict_lists_of_smart_value = {
'foo_value': [0,65535],
'Common_add_fuz':[0,1,2,3,4,254,255,256,257,511,512,513,1023,1024,1025,2047,2048,2049,4095,4096,4097,8195,8196,8197,16383,\
16384,16385,32762,32763,32764,32769,65531,65532,65533,65534,65535],
'qua_IN_REG_HO_REG':[0,1,2,3,64,123,124,125,126,127,511,512,513,1024,2047,2048,2049,4095,4096,4097,5000,8196,10000,32762,32763,\
32764,32769,65333,65534,65535],
'qua_COILS_DIS_IN':[0,1,2,3,64,123,124,125,126,127,511,512,513,1000,1998,1999,2000,2001,2002,2047,2048,2049,4095,4096,4097,\
5000,8196,10000,32762,32763,32764,32769,65333,65534,65535] ,
'qua_WR_MU_REG_RW_Multiple':[0,1,2,3,63,64,119,120,121,122,123,124,125,126,127,511,512,513,1024,2048,2049,4096,4097,5000,8196,\
10000,32762,32763,32764,32769,65333,65534,65535],
'qua_W_MUL_COILS':[0,1,2,3,64,123,124,125,126,127,511,512,513,984,1966,1967,1968,1999,2000,2001,2002,2047,2048,4096,4097,5000,\
8196,10000,32762,32763,32764,32769,65333,65534,65535],
'value_w_fc23': [0,1,2,64,119,120,122,123,121,122,123,124,125,512,513,1024,2048,2049,4096,4097,5000,8196,10000,32762,32763,32764,\
32769,65533,65534,65535],
'value_w_byte_count':[0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 246, 247, 248, 249, 250, 251, \
252, 253, 254, 255],
'value_test_refer_type':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 249, 250, 251, \
252, 253, 254, 255], #Reference Type list
'value_test_Byte_count':[0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 246, 247, 248, 249, 250, 251, 252, 253, 254, \
255],
'value_test_file_number':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, \
255, 256, 257, 511, 512, 513, 1023, 1024, 1025, 2047, 2048, 2049, 4095, 4096, 4097, 8191, 8192, 8193, 16383, 16384, 16385, 32767,\
32768, 32769, 65471, 65472, 65473, 65503, 65504, 65505, 65519, 65520, 65521, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535],
'value_test_record_number':[0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 255, 256, 257, 511, 512, 513, 1023, \
1024, 1025, 2047, 2048, 2049, 4095, 4096, 4097, 8191, 8192, 8193, 9993, 9994, 9995, 9996, 9997, 9998, 9999, 10000, 10001, 10002, 10003, 10004, 16383, 16384, 16385, 32767, 32768, 32769, 65471, 65472, 65473, 65503, 65504, 65505, 65519, 65520, 65521, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535],
'value_test_record_length':[0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 116, 117, 118, 119, 120, 121, 122, 123, 124, \
125, 126, 127, 128, 129, 255, 256, 257, 511, 512, 513, 1023, 1024, 1025, 2047, 2048, 2049, 4095, 4096, 4097, 8191, 8192, 8193, 16383, 16384, \
16385, 32767, 32768, 32769, 65471, 65472, 65473, 65503, 65504, 65505, 65519, 65520, 65521, 65527, 65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535],
'foo_len': [0, 1,2,3,4,5,6,7,8,9,10,255,256,257,258,259,260,261,262,263,264,511,512,513,1024,2048,2049,1452,1451,1454,1455,1461,\
1462,1459,1458,2048,2049,4096,4097,5000,8196,10000,32762,32763,32764,32769,65534,65533,65535],
}
Dict_fuzz_operation = {
'foo_value': [0,65535],
'foo_fct': [0,(7,8,9,11,12,17,43),list(range(65,73)),list(range(100,110)),list(range(111,128)),list(range(73,80)),list(range(1,65))],
'fp': ['repeat','random_pdu','remove','message'],
'f_mbap':['len' ,'clone','transId', 'protoId', 'unitId' ],
'payload_pdu':['diagnostics','randByte','randBit','zerobyte','corrupt_bytes','corrupt_bits','little_endian_payload','sendbad', 'sendresponse','exception'],
'f_reg':['function_code', 'starting_address', 'quantity_of_x'],
'f_wr':['function_code', 'starting_address', 'output_value'],
'f_mul_coil_reg':['function_code', 'starting_address','quantity_of_x','byte_count','value'],
'f_read_file_rec':['function_code','Byte_Count','Reference_Type','File_number','Record_number','Record_length'],
'f_write_file_rec':['Data_length','Reference_Type','File_number','Record_number','Record_length','Record_data'],
'f_mask':['function_code', 'and_mask','or_mask'],
'f_rw_reg':['function_code', 'read_count','write_count','write_byte_count', 'value'],
}
#df = pd.DataFrame(myDict)
#d
#print(myDict)
#print("The original dictionary is : " + str(myDict))
print(*Dict_fuzz_operation.items(), sep='\n')
foo_value=Dict_lists_of_smart_value.get('foo_value')
foo_fct=Dict_fuzz_operation.get('foo_fct')
#print (*foo_value)
#print(*Dict_lists_of_smart_value , sep = ", ")
#print('[',end='');print(foo_value, sep=', ', end='');print(']')
#print(*Dict_fuzz_operation , sep = ", ")
print (foo_fct)
#0, (7, 8, 9, 11, 12, 17, 43), [65, 66, 67, 68, 69, 70, 71, 72], [100, 101, 102, 103, 104, 105, 106, 107, 108, 109], \
#[111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127], [73, 74, 75, 76, 77, 78, 79], \
#[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64]]
print (foo_value)
# A list of the keys of dictionary
list_keys = [ k for k in Dict_fuzz_operation ]
# or a list of the values
list_values = [ v for v in Dict_fuzz_operation.values() ]
# or just a list of the list of key value pairs
list_key_value = [ [k,v] for k, v in Dict_fuzz_operation.items() ]
print(sep='\n')
print(list_key_value)
#df = pd.DataFrame(Dict_fuzz_operation)
"""Iterate over the dictionary using for loop
for key in wordFrequency:
value = wordFrequency[key]
print(key, " :: ", value)
out
qua_W_MUL_COILS : [0, 1, 2, 3, 64, 123, 124, 125, 126, 127, 511, 512, 513, 984, 1966, 1967, 1968, 1999, 2000, 2001, 2002, 2047, 2048, 4096, 4097, 5000, 8196, 10000, 32762, 32763, 32764, 32769, 65333, 65534, 65535]
"""
def iner_loop_key():
for key in Dict_lists_of_smart_value:
value = Dict_lists_of_smart_value[key]
#key = Dict_lists_of_smart_value[key] # for key and list value
#print(fuzz_session.key)
print(key, ":", value)
print(sep='\n')
prob_list = [('payload', 0.3), ('field_ADU', 0.1), ('field pdu', 0.4),('two fields in message', 0.1),('Not_fuzz',0.1)]
iner_loop_key()
print(prob_list)
|
gpl-3.0
|
superbobry/pymc3
|
pymc3/examples/lasso_block_update.py
|
14
|
1676
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# Sometimes, it is very useful to update a set of parameters together. For example, variables that are highly correlated are often good to update together. In PyMC 3 block updating is simple, as example will demonstrate.
#
# Here we have a LASSO regression model where the two coefficients are strongly correlated. Normally, we would define the coefficient parameters as a single random variable, but here we define them separately to show how to do block updates.
#
# First we generate some fake data.
# <codecell>
from matplotlib.pylab import *
from pymc3 import *
import numpy as np
d = np.random.normal(size=(3, 30))
d1 = d[0] + 4
d2 = d[1] + 4
yd = .2 * d1 + .3 * d2 + d[2]
# <markdowncell>
# Then define the random variables.
# <codecell>
with Model() as model:
s = Exponential('s', 1)
m1 = Laplace('m1', 0, 100)
m2 = Laplace('m2', 0, 100)
p = d1 * m1 + d2 * m2
y = Normal('y', p, s ** -2, observed=yd)
# <markdowncell>
# For most samplers, including Metropolis and HamiltonianMC, simply pass a
# list of variables to sample as a block. This works with both scalar and
# array parameters.
# <codecell>
with model:
step1 = Metropolis([m1, m2], blocked=True)
step2 = Metropolis([s], proposal_dist=LaplaceProposal)
def run(n=5000):
if n == "short":
n = 300
with model:
start = find_MAP()
trace = sample(n, [step1, step2], start)
dh = fn(hessian_diag(model.logpt))
# <codecell>
traceplot(trace)
# <codecell>
hexbin(trace[m1], trace[m2], gridsize=50)
# <codecell>
if __name__ == '__main__':
run()
|
apache-2.0
|
fabioticconi/scikit-learn
|
sklearn/utils/tests/test_utils.py
|
16
|
9120
|
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from scipy.linalg import eigh
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.graph import graph_laplacian
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1],
replace=False, n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
# Issue:6581, n_samples can be more when replace is True (default).
assert_equal(len(resample([1, 2], n_samples=5)), 5)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1,1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
|
bsd-3-clause
|
MartinDelzant/scikit-learn
|
sklearn/preprocessing/data.py
|
68
|
57385
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
bsd-3-clause
|
sudhanshuptl/python-neural-network
|
Iris_Data_Backprop/main.py
|
1
|
2498
|
__auther__='Jorgen Grimnes, Sudhanshu Patel'
from sklearn import datasets
import numpy as np
from activation_functions import sigmoid_function, tanh_function, linear_function,\
LReLU_function, ReLU_function, symmetric_elliot_function, elliot_function
from neuralnet import NeuralNet
class Instance:
# This is a simple encapsulation of a `input signal : output signal`
# pair in out training set.
def __init__(self, features, target):
self.features = np.array(features)
self.targets = np.array(target)
#endclass Instance
#-------------------------------------------------------------------------------
##Importing Iris data Set
iris = datasets.load_iris()
X = iris.data[:,]
Y = iris.target
inp=[]
for i in range(0,len(X)): # preprocessing Iris data, in 4 input and 3 output format
inp.append([list(X[i])])
if Y[i]==0:
y=[1,0,0]
elif Y[i]==1:
y=[0,1,0]
elif Y[i]==2:
y=[0,0,1]
inp[i].append(y)
#training sets
training_one =[]
for i in range(len(inp)):
training_one.append(Instance(inp[i][0],inp[i][1])) #Encapsulation of a `input signal : output signal
#------------------------------------------------------------------------------
n_inputs = 4 # Number of input feature
n_outputs = 3 # Number of neuron output
n_hiddens = 8 # Number of neuron at each hidden layer
n_hidden_layers = 2 # number of hidden layer
# here 2 Hidden layer with 8 node each and 1 output layer with 3 node
#------------------------DEclaration of activation or Transfer function at each layer --------------------------------------#
# specify activation functions per layer eg: [ hidden_layer_1, hidden_layer_2, output_layer ]
activation_functions = [symmetric_elliot_function,]*n_hidden_layers + [ sigmoid_function ]
# initialize the neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens, n_hidden_layers, activation_functions)
# network is Instance of class Neuralnet
# start training on test set one
network.backpropagation(training_one, ERROR_LIMIT=.05, learning_rate=0.2, momentum_factor=0.2 )
# save the trained network
network.save_to_file( "trained_configuration.pkl" )
# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )
# print out the result
for instance in training_one:
print instance.features, network.forwordProp( np.array([instance.features]) ), "\ttarget:", instance.targets
|
bsd-2-clause
|
Dekken/tick
|
tools/benchmark/hawkes_leastsq_weights.py
|
2
|
2869
|
# License: BSD 3 clause
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tools.benchmark.benchmark_util import (
iter_executables, run_benchmark, extract_build_from_name,
default_result_dir, get_last_result_dir)
BASE_FILE_NAME = os.path.basename(__file__).replace('.py', '')
def run_hawkes_leastsq_weights_benchmark():
result_dir = default_result_dir(base=BASE_FILE_NAME)
for executable in iter_executables('hawkes_leastsq_weights'):
n_threads = [1, 2, 4, 6, 8]
result_dir = run_benchmark(executable, n_threads, result_dir)
return result_dir
def _load_benchmark_data(result_dir=None):
if result_dir is None:
result_dir = get_last_result_dir(BASE_FILE_NAME)
cols = ["time", "iterations", "n_threads", "coeffs", "exectuable", "build"]
df = pd.DataFrame(columns=cols)
for result_file in [f for f in os.listdir(result_dir)
if f.endswith('tsv')]:
result_path = os.path.join(result_dir, result_file)
local_df = pd.read_csv(result_path, sep='\t', names=cols[:-1],
index_col=False)
local_df[cols[-1]] = extract_build_from_name(result_file)
df = df.append(local_df)
for num_col in [col for col in cols if col not in ['exectuable', 'build']]:
df[num_col] = pd.to_numeric(df[num_col])
return df, result_dir
def plot_hawkes_leastsq_weights_benchmark(result_dir=None):
df, result_dir = _load_benchmark_data(result_dir)
fig, axes = plt.subplots(1, 2)
ax_time = axes[0]
ax_speedup = axes[1]
max_threads = df['n_threads'].max()
ax_speedup.plot([1, max_threads], [1, max_threads], linestyle='--', lw=1,
color='grey')
for build, df_build in df.groupby('build'):
group_by_threads = df_build.groupby('n_threads')
grouped_times = group_by_threads['time']
mean_times = grouped_times.mean()
confidence_times = grouped_times.std() / np.sqrt(grouped_times.count())
confidence_times *= 1.96
ax_time.plot(mean_times, label=build)
ax_time.fill_between(mean_times.index,
mean_times - confidence_times,
mean_times + confidence_times,
alpha=.3)
ax_time.set_title('Time needed')
speed_ups = mean_times[1] / mean_times
ax_speedup.plot(speed_ups, label=build)
ax_speedup.set_title('Speed up')
for ax in axes:
ax.legend()
ax.set_xlabel('Threads')
fig.suptitle(BASE_FILE_NAME)
plot_file_path = os.path.abspath(os.path.join(result_dir, 'result.png'))
plt.savefig(plot_file_path)
print('saved figure in {}'.format(plot_file_path))
run_hawkes_leastsq_weights_benchmark()
plot_hawkes_leastsq_weights_benchmark()
|
bsd-3-clause
|
lakshayg/tensorflow
|
tensorflow/python/estimator/inputs/pandas_io_test.py
|
89
|
8340
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(TypeError,
'shuffle must be explicitly set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
|
apache-2.0
|
vybstat/scikit-learn
|
sklearn/datasets/__init__.py
|
176
|
3671
|
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
|
bsd-3-clause
|
thisch/pydipole
|
examples/ring.py
|
1
|
2643
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import logging
LG = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
from dipole.field import dipole_e_ff
from dipole.field import dipole_radiant_intensity
from dipole.helper import gen_r
def plot_intens(T=None, P=None, intens=None, title=None, XY=None, ax=None):
if ax is None:
fig, ax = plt.subplots()
if XY is not None:
ax.pcolormesh(XY[0], XY[1], intens)
ax.set_xlabel('x', fontsize=16)
ax.set_ylabel('y', fontsize=16)
else:
ax.pcolormesh(np.degrees(T*np.cos(P)), np.degrees(T*np.sin(P)),
intens)
ax.set_xlabel(r'$\Theta_x$', fontsize=16)
ax.set_ylabel(r'$\Theta_y$', fontsize=16)
tm = 10
ax.set_xlim(-tm, tm)
ax.set_ylim(-tm, tm)
# ax.set_xticks([-tm, -45, 0, 45, tm])
# ax.set_yticks([-tm, -45, 0, 45, tm])
ax.set_aspect('equal')
if title:
ax.set_title(title)
return ax
def main(onsphere=False):
thetamax = 10.
k = 1.
Lam = 2*np.pi/k
reval = 1000*Lam
ngrid = 128
ndip = 256
reval = 500.*Lam
LG.info('#### SETTINGS: k=%g, reval=%g', k, reval)
rparams = gen_r(ngrid, reval=reval, onsphere=onsphere, thetamax=thetamax)
dipole_phis, dphi = np.linspace(0, 2*np.pi, ndip,
retstep=True, endpoint=False)
pringfac = 1.
pring = np.zeros((ndip, 3)) # dipole moments
ringslice = slice(None, None, None)
pring[ringslice, 0] = -np.sin(dipole_phis) * pringfac
pring[ringslice, 1] = np.cos(dipole_phis) * pringfac
rringfac = 15.
rfac = rringfac*Lam
rring = np.zeros((ndip, 3)) # dipol aufpunkte
rring[ringslice, 0] = np.cos(dipole_phis) * rfac
rring[ringslice, 1] = np.sin(dipole_phis) * rfac
phases = np.zeros(ndip)
if onsphere:
# radiant intensity
intens = dipole_radiant_intensity(rparams[0],
rparams[1],
pring, rring, phases, k)
else:
eff = dipole_e_ff(rparams[-1], pring, rring, phases, k, t=0)
intens = np.linalg.norm(eff, axis=2)**2
if onsphere:
T, P, _ = rparams
ax = plot_intens(T, P, intens)
else:
X, Y, _, = rparams
ax = plot_intens(intens=intens, XY=(X, Y))
ax.set_title('k=%g, %s' % (k,
('reval=%g' % reval) if onsphere else
('zeval=%g' % reval)))
if __name__ == '__main__':
main(onsphere=True)
main(onsphere=False)
plt.show()
|
mit
|
RayMick/scikit-learn
|
examples/classification/plot_classifier_comparison.py
|
66
|
4895
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
|
bsd-3-clause
|
Sapphirine/bestbuy-recommend
|
src/frequency.py
|
2
|
4065
|
import xml.etree.ElementTree as ET
import csv
import sys
from collections import defaultdict
from Parser import *
import operator
from preprocess import *
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy
from numpy import *
from heapq import *
import re
def getQuerySkuDict(filename):
inputFile = open(filename)
reader = csv.reader(inputFile)
querySkuDict = defaultdict(lambda: defaultdict(int))
queryStat = defaultdict(int)
par = Parser()
for row in reader:
query = row[3]
query = preprocessQuery(query, par)
sku = row[1]
queryStat[query] += 1
querySkuDict[query][sku] += 1
return querySkuDict, queryStat
def getProductInfo():
productTree = ET.parse('../data/small_product_data.xml')
root = productTree.getroot()
productSkus = []
productDescpt = []
for product in root.findall('product'):
sku = product.find('sku').text
description = product.find('longDescription').text
productSkus.append(sku)
productDescpt.append(description)
return productSkus, productDescpt
def getDatumInfo(testFileName):
testFile = open(testFileName, 'r')
reader = csv.reader(testFile)
datum = []
queries = []
for row in reader:
datum.append(row)
queries.append(row[3])
return datum, queries
def getQueryStats():
trainFileName = '../data/psudo_train2.csv'
querySkuDict, queryStat = getQuerySkuDict(trainFileName)
sortedQuery = sorted(queryStat.items(), key=operator.itemgetter(1), reverse=True)
top100 = sortedQuery[0:100]
output = open('../data/result/top100query.txt','w')
for queryDict in top100:
query = queryDict[0]
query = re.sub("\s","-",query)
count = queryDict[1]
query += " "
outputStr = query * int(count*1.0 / 10)
output.write(outputStr)
output.close()
if __name__ == "__main__":
trainFileName = '../data/psudo_train2.csv'
testFileName = '../data/psudo_test2.csv'
querySkuDict, queryStat = getQuerySkuDict(trainFileName)
productSkus, productDescpt = getProductInfo()
datum, queries = getDatumInfo(testFileName)
corpus = []
corpus.extend(productDescpt)
corpus.extend(queries)
vectorizer = TfidfVectorizer(min_df=1)
tfidf = vectorizer.fit_transform(corpus)
tfidfArray = tfidf.toarray()
numProducts = len(productSkus)
numQuery = len(datum)
"""
for each query, I get the top five sku with product(tfidf(product), tfidf(query))
"""
contentOutput = open('../data/result/content.csv','w')
collabOutput = open('../data/result/frequency.csv','w')
writer1 = csv.writer(contentOutput)
writer2 = csv.writer(collabOutput)
total = 0.0
correct = 0.0
correct1 = 0.0
tmp = 0.0
tmp1 = 0.0
for x in range(0, numQuery):
total += 1
trueSku = datum[x][1]
query = queries[x]
#first do content-based filtering
queryArray = tfidfArray[x+numProducts,:]
scoreDict = {}
for j in range(0, numProducts):
prodArray = tfidfArray[j,:]
score = numpy.dot(queryArray, prodArray)
if score > 0:
scoreDict[productSkus[j]] = score
skusItems = nlargest(5, scoreDict.items(), key=operator.itemgetter(1))
skus1 = []
for sku in skusItems:
skus1.append(sku[0])
#then do collaborative filtering
skus2 = []
if query in querySkuDict:
for sku in sorted( querySkuDict[query], key=querySkuDict[query].get, reverse = True ):
skus2.append(sku)
skus2 = skus2[0:5]
writer1.writerow(skus1)
writer2.writerow(skus2)
if trueSku in skus1:
correct1 += 1.0 / (skus1.index(trueSku) + 1)
tmp1 += 1
if trueSku in skus2:
correct += 1.0 / (skus2.index(trueSku) + 1)
tmp += 1
print "frequence precision " + str(tmp/total)
print "frequence score: " + str(correct / total)
|
mit
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/best_kNN_PCA/2_categories/test11_cross_validate_categories_mov_fixed_1200ms_scaled_method_iii.py
|
1
|
5012
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_III import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
mit
|
gfyoung/pandas
|
pandas/tests/extension/arrow/arrays.py
|
2
|
5349
|
"""
Rudimentary Apache Arrow-backed ExtensionArray.
At the moment, just a boolean array / type is implemented.
Eventually, we'll want to parametrize the type and support
multiple dtypes. Not all methods are implemented yet, and the
current implementation is not efficient.
"""
from __future__ import annotations
import copy
import itertools
import operator
from typing import Type
import numpy as np
import pyarrow as pa
import pandas as pd
from pandas.api.extensions import (
ExtensionArray,
ExtensionDtype,
register_extension_dtype,
take,
)
from pandas.api.types import is_scalar
from pandas.core.arraylike import OpsMixin
@register_extension_dtype
class ArrowBoolDtype(ExtensionDtype):
type = np.bool_
kind = "b"
name = "arrow_bool"
na_value = pa.NULL
@classmethod
def construct_array_type(cls) -> Type[ArrowBoolArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ArrowBoolArray
@property
def _is_boolean(self) -> bool:
return True
@register_extension_dtype
class ArrowStringDtype(ExtensionDtype):
type = str
kind = "U"
name = "arrow_string"
na_value = pa.NULL
@classmethod
def construct_array_type(cls) -> Type[ArrowStringArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ArrowStringArray
class ArrowExtensionArray(OpsMixin, ExtensionArray):
_data: pa.ChunkedArray
@classmethod
def from_scalars(cls, values):
arr = pa.chunked_array([pa.array(np.asarray(values))])
return cls(arr)
@classmethod
def from_array(cls, arr):
assert isinstance(arr, pa.Array)
return cls(pa.chunked_array([arr]))
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls.from_scalars(scalars)
def __repr__(self):
return f"{type(self).__name__}({repr(self._data)})"
def __getitem__(self, item):
if is_scalar(item):
return self._data.to_pandas()[item]
else:
vals = self._data.to_pandas()[item]
return type(self).from_scalars(vals)
def __len__(self):
return len(self._data)
def astype(self, dtype, copy=True):
# needed to fix this astype for the Series constructor.
if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
if copy:
return self.copy()
return self
return super().astype(dtype, copy)
@property
def dtype(self):
return self._dtype
def _logical_method(self, other, op):
if not isinstance(other, type(self)):
raise NotImplementedError()
result = op(np.array(self._data), np.array(other._data))
return ArrowBoolArray(
pa.chunked_array([pa.array(result, mask=pd.isna(self._data.to_pandas()))])
)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self._logical_method(other, operator.eq)
@property
def nbytes(self) -> int:
return sum(
x.size
for chunk in self._data.chunks
for x in chunk.buffers()
if x is not None
)
def isna(self):
nas = pd.isna(self._data.to_pandas())
return type(self).from_scalars(nas)
def take(self, indices, allow_fill=False, fill_value=None):
data = self._data.to_pandas()
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
def copy(self):
return type(self)(copy.copy(self._data))
@classmethod
def _concat_same_type(cls, to_concat):
chunks = list(itertools.chain.from_iterable(x._data.chunks for x in to_concat))
arr = pa.chunked_array(chunks)
return cls(arr)
def __invert__(self):
return type(self).from_scalars(~self._data.to_pandas())
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if skipna:
arr = self[~self.isna()]
else:
arr = self
try:
op = getattr(arr, name)
except AttributeError as err:
raise TypeError from err
return op(**kwargs)
def any(self, axis=0, out=None):
# Explicitly return a plain bool to reproduce GH-34660
return bool(self._data.to_pandas().any())
def all(self, axis=0, out=None):
# Explicitly return a plain bool to reproduce GH-34660
return bool(self._data.to_pandas().all())
class ArrowBoolArray(ArrowExtensionArray):
def __init__(self, values):
if not isinstance(values, pa.ChunkedArray):
raise ValueError
assert values.type == pa.bool_()
self._data = values
self._dtype = ArrowBoolDtype()
class ArrowStringArray(ArrowExtensionArray):
def __init__(self, values):
if not isinstance(values, pa.ChunkedArray):
raise ValueError
assert values.type == pa.string()
self._data = values
self._dtype = ArrowStringDtype()
|
bsd-3-clause
|
mrkm4ntr/incubator-airflow
|
airflow/providers/google/cloud/hooks/bigquery.py
|
1
|
123031
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import hashlib
import json
import logging
import time
import warnings
from copy import deepcopy
from datetime import datetime, timedelta
from typing import Any, Dict, Iterable, List, Mapping, NoReturn, Optional, Sequence, Tuple, Type, Union
from google.api_core.retry import Retry
from google.cloud.bigquery import (
DEFAULT_RETRY,
Client,
CopyJob,
ExternalConfig,
ExtractJob,
LoadJob,
QueryJob,
SchemaField,
)
from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem, DatasetReference
from google.cloud.bigquery.table import EncryptionConfiguration, Row, Table, TableReference
from google.cloud.exceptions import NotFound
from googleapiclient.discovery import Resource, build
from pandas import DataFrame
from pandas_gbq import read_gbq
from pandas_gbq.gbq import (
GbqConnector,
_check_google_client_version as gbq_check_google_client_version,
_test_google_api_imports as gbq_test_google_api_imports,
)
from airflow.exceptions import AirflowException
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.utils.helpers import convert_camel_to_snake
from airflow.utils.log.logging_mixin import LoggingMixin
log = logging.getLogger(__name__)
BigQueryJob = Union[CopyJob, QueryJob, LoadJob, ExtractJob]
# pylint: disable=too-many-public-methods
class BigQueryHook(GoogleBaseHook, DbApiHook):
"""Interact with BigQuery. This hook uses the Google Cloud connection."""
conn_name_attr = 'gcp_conn_id' # type: str
def __init__(
self,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
bigquery_conn_id: Optional[str] = None,
api_resource_configs: Optional[Dict] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=2,
)
gcp_conn_id = bigquery_conn_id
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.use_legacy_sql = use_legacy_sql
self.location = location
self.running_job_id = None # type: Optional[str]
self.api_resource_configs = api_resource_configs if api_resource_configs else {} # type Dict
def get_conn(self) -> "BigQueryConnection":
"""Returns a BigQuery PEP 249 connection object."""
service = self.get_service()
return BigQueryConnection(
service=service,
project_id=self.project_id,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
num_retries=self.num_retries,
hook=self,
)
def get_service(self) -> Resource:
"""Returns a BigQuery service object."""
warnings.warn(
"This method will be deprecated. Please use `BigQueryHook.get_client` method", DeprecationWarning
)
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized, cache_discovery=False)
def get_client(self, project_id: Optional[str] = None, location: Optional[str] = None) -> Client:
"""
Returns authenticated BigQuery Client.
:param project_id: Project ID for the project which the client acts on behalf of.
:type project_id: str
:param location: Default location for jobs / datasets / tables.
:type location: str
:return:
"""
return Client(
client_info=self.client_info,
project=project_id,
location=location,
credentials=self._get_credentials(),
)
@staticmethod
def _resolve_table_reference(
table_resource: Dict[str, Any],
project_id: Optional[str] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
) -> Dict[str, Any]:
try:
# Check if tableReference is present and is valid
TableReference.from_api_repr(table_resource["tableReference"])
except KeyError:
# Something is wrong so we try to build the reference
table_resource["tableReference"] = table_resource.get("tableReference", {})
values = [("projectId", project_id), ("tableId", table_id), ("datasetId", dataset_id)]
for key, value in values:
# Check if value is already present if no use the provided one
resolved_value = table_resource["tableReference"].get(key, value)
if not resolved_value:
# If there's no value in tableReference and provided one is None raise error
raise AirflowException(
f"Table resource is missing proper `tableReference` and `{key}` is None"
)
table_resource["tableReference"][key] = resolved_value
return table_resource
def insert_rows(
self,
table: Any,
rows: Any,
target_fields: Any = None,
commit_every: Any = 1000,
replace: Any = False,
**kwargs,
) -> None:
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(
self,
sql: str,
parameters: Optional[Union[Iterable, Mapping]] = None,
dialect: Optional[str] = None,
**kwargs,
) -> DataFrame:
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: str
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: str in {'legacy', 'standard'}
:param kwargs: (optional) passed into pandas_gbq.read_gbq method
:type kwargs: dict
"""
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
credentials, project_id = self._get_credentials_and_project_id()
return read_gbq(
sql, project_id=project_id, dialect=dialect, verbose=False, credentials=credentials, **kwargs
)
@GoogleBaseHook.fallback_to_default_project_id
def table_exists(self, dataset_id: str, table_id: str, project_id: str) -> bool:
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
"""
table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id)
try:
self.get_client(project_id=project_id).get_table(table_reference)
return True
except NotFound:
return False
@GoogleBaseHook.fallback_to_default_project_id
def table_partition_exists(
self, dataset_id: str, table_id: str, partition_id: str, project_id: str
) -> bool:
"""
Checks for the existence of a partition in a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
:param partition_id: The name of the partition to check the existence of.
:type partition_id: str
"""
table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id)
try:
return partition_id in self.get_client(project_id=project_id).list_partitions(table_reference)
except NotFound:
return False
@GoogleBaseHook.fallback_to_default_project_id
def create_empty_table( # pylint: disable=too-many-arguments
self,
project_id: Optional[str] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
table_resource: Optional[Dict[str, Any]] = None,
schema_fields: Optional[List] = None,
time_partitioning: Optional[Dict] = None,
cluster_fields: Optional[List[str]] = None,
labels: Optional[Dict] = None,
view: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
retry: Optional[Retry] = DEFAULT_RETRY,
num_retries: Optional[int] = None,
location: Optional[str] = None,
exists_ok: bool = True,
) -> Table:
"""
Creates a new, empty table in the dataset.
To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If provided all other parameters are ignored.
:type table_resource: Dict[str, Any]
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:type schema_fields: list
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
:param retry: Optional. How to retry the RPC.
:type retry: google.api_core.retry.Retry
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param cluster_fields: [Optional] The fields used for clustering.
BigQuery supports clustering for both partitioned and
non-partitioned tables.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:type cluster_fields: list
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition
:type view: dict
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000",
"useLegacySql": False
}
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param num_retries: Maximum number of retries in case of connection problems.
:type num_retries: int
:param exists_ok: If ``True``, ignore "already exists" errors when creating the table.
:type exists_ok: bool
:return: Created table
"""
if num_retries:
warnings.warn("Parameter `num_retries` is deprecated", DeprecationWarning)
_table_resource: Dict[str, Any] = {}
if self.location:
_table_resource['location'] = self.location
if schema_fields:
_table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
_table_resource['timePartitioning'] = time_partitioning
if cluster_fields:
_table_resource['clustering'] = {'fields': cluster_fields}
if labels:
_table_resource['labels'] = labels
if view:
_table_resource['view'] = view
if encryption_configuration:
_table_resource["encryptionConfiguration"] = encryption_configuration
table_resource = table_resource or _table_resource
table_resource = self._resolve_table_reference(
table_resource=table_resource,
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
table = Table.from_api_repr(table_resource)
return self.get_client(project_id=project_id, location=location).create_table(
table=table, exists_ok=exists_ok, retry=retry
)
@GoogleBaseHook.fallback_to_default_project_id
def create_empty_dataset(
self,
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
location: Optional[str] = None,
dataset_reference: Optional[Dict[str, Any]] = None,
exists_ok: bool = True,
) -> None:
"""
Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide, if datasetId in dataset_reference.
:type dataset_id: str
:param location: (Optional) The geographic location where the dataset should reside.
There is no default value but the dataset will be created in US if nothing is provided.
:type location: str
:param dataset_reference: Dataset reference that could be provided with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
:param exists_ok: If ``True``, ignore "already exists" errors when creating the DATASET.
:type exists_ok: bool
"""
dataset_reference = dataset_reference or {"datasetReference": {}}
for param, value in zip(["datasetId", "projectId"], [dataset_id, project_id]):
specified_param = dataset_reference["datasetReference"].get(param)
if specified_param:
if value:
self.log.info(
"`%s` was provided in both `dataset_reference` and as `%s`. "
"Using value from `dataset_reference`",
param,
convert_camel_to_snake(param),
)
continue # use specified value
if not value:
raise ValueError(
f"Please specify `{param}` either in `dataset_reference` "
f"or by providing `{convert_camel_to_snake(param)}`",
)
# dataset_reference has no param but we can fallback to default value
self.log.info(
"%s was not specified in `dataset_reference`. Will use default value %s.", param, value
)
dataset_reference["datasetReference"][param] = value
location = location or self.location
if location:
dataset_reference["location"] = dataset_reference.get("location", location)
dataset: Dataset = Dataset.from_api_repr(dataset_reference)
self.log.info('Creating dataset: %s in project: %s ', dataset.dataset_id, dataset.project)
self.get_client(location=location).create_dataset(dataset=dataset, exists_ok=exists_ok)
self.log.info('Dataset created successfully.')
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset_tables(
self,
dataset_id: str,
project_id: Optional[str] = None,
max_results: Optional[int] = None,
retry: Retry = DEFAULT_RETRY,
) -> List[Dict[str, Any]]:
"""
Get the list of tables for a given dataset.
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
:param dataset_id: the dataset ID of the requested dataset.
:type dataset_id: str
:param project_id: (Optional) the project of the requested dataset. If None,
self.project_id will be used.
:type project_id: str
:param max_results: (Optional) the maximum number of tables to return.
:type max_results: int
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
:return: List of tables associated with the dataset.
"""
self.log.info('Start getting tables list from dataset: %s.%s', project_id, dataset_id)
tables = self.get_client().list_tables(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
max_results=max_results,
retry=retry,
)
# Convert to a list (consumes all values)
return [t.reference.to_api_repr() for t in tables]
@GoogleBaseHook.fallback_to_default_project_id
def delete_dataset(
self,
dataset_id: str,
project_id: Optional[str] = None,
delete_contents: bool = False,
retry: Retry = DEFAULT_RETRY,
) -> None:
"""
Delete a dataset of Big query in your project.
:param project_id: The name of the project where we have the dataset.
:type project_id: str
:param dataset_id: The dataset to be delete.
:type dataset_id: str
:param delete_contents: If True, delete all the tables in the dataset.
If False and the dataset contains tables, the request will fail.
:type delete_contents: bool
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
"""
self.log.info('Deleting from project: %s Dataset:%s', project_id, dataset_id)
self.get_client(project_id=project_id).delete_dataset(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
delete_contents=delete_contents,
retry=retry,
not_found_ok=True,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_external_table( # pylint: disable=too-many-locals,too-many-arguments
self,
external_project_dataset_table: str,
schema_fields: List,
source_uris: List,
source_format: str = 'CSV',
autodetect: bool = False,
compression: str = 'NONE',
ignore_unknown_values: bool = False,
max_bad_records: int = 0,
skip_leading_rows: int = 0,
field_delimiter: str = ',',
quote_character: Optional[str] = None,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
encoding: str = "UTF-8",
src_fmt_configs: Optional[Dict] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
project_id: Optional[str] = None,
) -> None:
"""
Creates a new external table in the dataset with the data from Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table name to create external table.
If ``<project>`` is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
source_format is CSV.
:type allow_jagged_rows: bool
:param encoding: The character encoding of the data. See:
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:type encoding: str
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.create_empty_table` method with"
"pass passing the `table_resource` object. This gives more flexibility than this method.",
DeprecationWarning,
)
location = location or self.location
src_fmt_configs = src_fmt_configs or {}
source_format = source_format.upper()
compression = compression.upper()
external_config_api_repr = {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values,
}
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
backward_compatibility_configs = {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'allowJaggedRows': allow_jagged_rows,
'encoding': encoding,
}
src_fmt_to_param_mapping = {'CSV': 'csvOptions', 'GOOGLE_SHEETS': 'googleSheetsOptions'}
src_fmt_to_configs_mapping = {
'csvOptions': [
'allowJaggedRows',
'allowQuotedNewlines',
'fieldDelimiter',
'skipLeadingRows',
'quote',
'encoding',
],
'googleSheetsOptions': ['skipLeadingRows'],
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[src_fmt_to_param_mapping[source_format]]
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
external_config_api_repr[src_fmt_to_param_mapping[source_format]] = src_fmt_configs
# build external config
external_config = ExternalConfig.from_api_repr(external_config_api_repr)
if schema_fields:
external_config.schema = [SchemaField.from_api_repr(f) for f in schema_fields]
if max_bad_records:
external_config.max_bad_records = max_bad_records
# build table definition
table = Table(table_ref=TableReference.from_string(external_project_dataset_table, project_id))
table.external_data_configuration = external_config
if labels:
table.labels = labels
if encryption_configuration:
table.encryption_configuration = EncryptionConfiguration.from_api_repr(encryption_configuration)
self.log.info('Creating external table: %s', external_project_dataset_table)
self.create_empty_table(
table_resource=table.to_api_repr(), project_id=project_id, location=location, exists_ok=True
)
self.log.info('External table created successfully: %s', external_project_dataset_table)
@GoogleBaseHook.fallback_to_default_project_id
def update_table(
self,
table_resource: Dict[str, Any],
fields: Optional[List[str]] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
project_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Change some fields of a table.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``table``, the field value will be deleted.
If ``table.etag`` is not ``None``, the update will only succeed if
the table on the server has the same ETag. Thus reading a table with
``get_table``, changing its fields, and then passing it to
``update_table`` will ensure that the changes will only be saved if
no modifications to the table occurred since the read.
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
The table has to contain ``tableReference`` or ``project_id``, ``dataset_id`` and ``table_id``
have to be provided.
:type table_resource: Dict[str, Any]
:param fields: The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
:type fields: List[str]
"""
fields = fields or list(table_resource.keys())
table_resource = self._resolve_table_reference(
table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
table = Table.from_api_repr(table_resource)
self.log.info('Updating table: %s', table_resource["tableReference"])
table_object = self.get_client().update_table(table=table, fields=fields)
self.log.info('Table %s.%s.%s updated successfully', project_id, dataset_id, table_id)
return table_object.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def patch_table( # pylint: disable=too-many-arguments
self,
dataset_id: str,
table_id: str,
project_id: Optional[str] = None,
description: Optional[str] = None,
expiration_time: Optional[int] = None,
external_data_configuration: Optional[Dict] = None,
friendly_name: Optional[str] = None,
labels: Optional[Dict] = None,
schema: Optional[List] = None,
time_partitioning: Optional[Dict] = None,
view: Optional[Dict] = None,
require_partition_filter: Optional[bool] = None,
encryption_configuration: Optional[Dict] = None,
) -> None:
"""
Patch information in an existing table.
It only updates fields that are provided in the request object.
Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch
:param dataset_id: The dataset containing the table to be patched.
:type dataset_id: str
:param table_id: The Name of the table to be patched.
:type table_id: str
:param project_id: The project containing the table to be patched.
:type project_id: str
:param description: [Optional] A user-friendly description of this table.
:type description: str
:param expiration_time: [Optional] The time when this table expires,
in milliseconds since the epoch.
:type expiration_time: int
:param external_data_configuration: [Optional] A dictionary containing
properties of a table stored outside of BigQuery.
:type external_data_configuration: dict
:param friendly_name: [Optional] A descriptive name for this table.
:type friendly_name: str
:param labels: [Optional] A dictionary containing labels associated with this table.
:type labels: dict
:param schema: [Optional] If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
The supported schema modifications and unsupported schema modification are listed here:
https://cloud.google.com/bigquery/docs/managing-table-schemas
**Example**: ::
schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema: list
:param time_partitioning: [Optional] A dictionary containing time-based partitioning
definition for the table.
:type time_partitioning: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will patch a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
"useLegacySql": False
}
:type view: dict
:param require_partition_filter: [Optional] If true, queries over the this table require a
partition filter. If false, queries over the table
:type require_partition_filter: bool
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated, please use ``BigQueryHook.update_table`` method.",
DeprecationWarning,
)
table_resource: Dict[str, Any] = {}
if description is not None:
table_resource['description'] = description
if expiration_time is not None:
table_resource['expirationTime'] = expiration_time
if external_data_configuration:
table_resource['externalDataConfiguration'] = external_data_configuration
if friendly_name is not None:
table_resource['friendlyName'] = friendly_name
if labels:
table_resource['labels'] = labels
if schema:
table_resource['schema'] = {'fields': schema}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if view:
table_resource['view'] = view
if require_partition_filter is not None:
table_resource['requirePartitionFilter'] = require_partition_filter
if encryption_configuration:
table_resource["encryptionConfiguration"] = encryption_configuration
self.update_table(
table_resource=table_resource,
fields=list(table_resource.keys()),
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
@GoogleBaseHook.fallback_to_default_project_id
def insert_all(
self,
project_id: str,
dataset_id: str,
table_id: str,
rows: List,
ignore_unknown_values: bool = False,
skip_invalid_rows: bool = False,
fail_on_error: bool = False,
) -> None:
"""
Method to stream data into BigQuery one record at a time without needing
to run a load job
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
:param project_id: The name of the project where we have the table
:type project_id: str
:param dataset_id: The name of the dataset where we have the table
:type dataset_id: str
:param table_id: The name of the table
:type table_id: str
:param rows: the rows to insert
:type rows: list
**Example or rows**:
rows=[{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}]
:param ignore_unknown_values: [Optional] Accept rows that contain values
that do not match the schema. The unknown values are ignored.
The default value is false, which treats unknown values as errors.
:type ignore_unknown_values: bool
:param skip_invalid_rows: [Optional] Insert all valid rows of a request,
even if invalid rows exist. The default value is false, which causes
the entire request to fail if any invalid rows exist.
:type skip_invalid_rows: bool
:param fail_on_error: [Optional] Force the task to fail if any errors occur.
The default value is false, which indicates the task should not fail
even if any insertion errors occur.
:type fail_on_error: bool
"""
self.log.info('Inserting %s row(s) into table %s:%s.%s', len(rows), project_id, dataset_id, table_id)
table = self._resolve_table_reference(
table_resource={}, project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
errors = self.get_client().insert_rows(
table=Table.from_api_repr(table),
rows=rows,
ignore_unknown_values=ignore_unknown_values,
skip_invalid_rows=skip_invalid_rows,
)
if errors:
error_msg = f"{len(errors)} insert error(s) occurred. Details: {errors}"
self.log.error(error_msg)
if fail_on_error:
raise AirflowException(f'BigQuery job failed. Error was: {error_msg}')
else:
self.log.info('All row(s) inserted successfully: %s:%s.%s', project_id, dataset_id, table_id)
@GoogleBaseHook.fallback_to_default_project_id
def update_dataset(
self,
fields: Sequence[str],
dataset_resource: Dict[str, Any],
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
retry: Retry = DEFAULT_RETRY,
) -> Dataset:
"""
Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
:param dataset_resource: Dataset resource that will be provided
in request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: dict
:param dataset_id: The id of the dataset.
:type dataset_id: str
:param fields: The properties of ``dataset`` to change (e.g. "friendly_name").
:type fields: Sequence[str]
:param project_id: The Google Cloud Project ID
:type project_id: str
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
"""
dataset_resource["datasetReference"] = dataset_resource.get("datasetReference", {})
for key, value in zip(["datasetId", "projectId"], [dataset_id, project_id]):
spec_value = dataset_resource["datasetReference"].get(key)
if value and not spec_value:
dataset_resource["datasetReference"][key] = value
self.log.info('Start updating dataset')
dataset = self.get_client(project_id=project_id).update_dataset(
dataset=Dataset.from_api_repr(dataset_resource),
fields=fields,
retry=retry,
)
self.log.info("Dataset successfully updated: %s", dataset)
return dataset
def patch_dataset(
self, dataset_id: str, dataset_resource: Dict, project_id: Optional[str] = None
) -> Dict:
"""
Patches information in an existing dataset.
It only replaces fields that are provided in the submitted dataset resource.
More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/patch
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param dataset_resource: Dataset resource that will be provided
in request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: dict
:param project_id: The Google Cloud Project ID
:type project_id: str
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
warnings.warn("This method is deprecated. Please use ``update_dataset``.", DeprecationWarning)
project_id = project_id or self.project_id
if not dataset_id or not isinstance(dataset_id, str):
raise ValueError(
"dataset_id argument must be provided and has "
"a type 'str'. You provided: {}".format(dataset_id)
)
service = self.get_service()
dataset_project_id = project_id or self.project_id
self.log.info('Start patching dataset: %s:%s', dataset_project_id, dataset_id)
dataset = (
service.datasets() # pylint: disable=no-member
.patch(
datasetId=dataset_id,
projectId=dataset_project_id,
body=dataset_resource,
)
.execute(num_retries=self.num_retries)
)
self.log.info("Dataset successfully patched: %s", dataset)
return dataset
def get_dataset_tables_list(
self,
dataset_id: str,
project_id: Optional[str] = None,
table_prefix: Optional[str] = None,
max_results: Optional[int] = None,
) -> List[Dict[str, Any]]:
"""
Method returns tables list of a BigQuery tables. If table prefix is specified,
only tables beginning by it are returned.
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param project_id: The Google Cloud Project ID
:type project_id: str
:param table_prefix: Tables must begin by this prefix to be returned (case sensitive)
:type table_prefix: str
:param max_results: The maximum number of results to return in a single response page.
Leverage the page tokens to iterate through the entire collection.
:type max_results: int
:return: List of tables associated with the dataset
"""
warnings.warn("This method is deprecated. Please use ``get_dataset_tables``.", DeprecationWarning)
project_id = project_id or self.project_id
tables = self.get_client().list_tables(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
max_results=max_results,
)
if table_prefix:
result = [t.reference.to_api_repr() for t in tables if t.table_id.startswith(table_prefix)]
else:
result = [t.reference.to_api_repr() for t in tables]
self.log.info("%s tables found", len(result))
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_datasets_list(
self,
project_id: Optional[str] = None,
include_all: bool = False,
filter_: Optional[str] = None,
max_results: Optional[int] = None,
page_token: Optional[str] = None,
retry: Retry = DEFAULT_RETRY,
) -> List[DatasetListItem]:
"""
Method returns full list of BigQuery datasets in the current project
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you try to get all datasets
:type project_id: str
:param include_all: True if results include hidden datasets. Defaults to False.
:param filter_: An expression for filtering the results by label. For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
:param filter_: str
:param max_results: Maximum number of datasets to return.
:param max_results: int
:param page_token: Token representing a cursor into the datasets. If not passed,
the API will return the first page of datasets. The token marks the beginning of the
iterator to be returned and the value of the ``page_token`` can be accessed at
``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`.
:param page_token: str
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
"""
datasets = self.get_client(project_id=project_id).list_datasets(
project=project_id,
include_all=include_all,
filter=filter_,
max_results=max_results,
page_token=page_token,
retry=retry,
)
datasets_list = list(datasets)
self.log.info("Datasets List: %s", len(datasets_list))
return datasets_list
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset(self, dataset_id: str, project_id: Optional[str] = None) -> Dataset:
"""
Fetch the dataset referenced by dataset_id.
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param project_id: The Google Cloud Project ID
:type project_id: str
:return: dataset_resource
.. seealso::
For more information, see Dataset Resource content:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
dataset = self.get_client(project_id=project_id).get_dataset(
dataset_ref=DatasetReference(project_id, dataset_id)
)
self.log.info("Dataset Resource: %s", dataset)
return dataset
@GoogleBaseHook.fallback_to_default_project_id
def run_grant_dataset_view_access(
self,
source_dataset: str,
view_dataset: str,
view_table: str,
source_project: Optional[str] = None,
view_project: Optional[str] = None,
project_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param project_id: the project of the source dataset. If None,
self.project_id will be used.
:type project_id: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
if source_project:
project_id = source_project
warnings.warn(
"Parameter ``source_project`` is deprecated. Use ``project_id``.",
DeprecationWarning,
)
view_project = view_project or project_id
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': view_project, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = self.get_dataset(project_id=project_id, dataset_id=source_dataset)
# Check to see if the view we want to add already exists.
if view_access not in dataset.access_entries:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project,
view_dataset,
view_table,
project_id,
source_dataset,
)
dataset.access_entries += [view_access]
dataset = self.update_dataset(
fields=["access"], dataset_resource=dataset.to_api_repr(), project_id=project_id
)
else:
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project,
view_dataset,
view_table,
project_id,
source_dataset,
)
return dataset.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def run_table_upsert(
self, dataset_id: str, table_resource: Dict[str, Any], project_id: Optional[str] = None
) -> Dict[str, Any]:
"""
If the table already exists, update the existing table if not create new.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
table_id = table_resource['tableReference']['tableId']
table_resource = self._resolve_table_reference(
table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
tables_list_resp = self.get_dataset_tables(dataset_id=dataset_id, project_id=project_id)
if any(table['tableId'] == table_id for table in tables_list_resp):
self.log.info('Table %s:%s.%s exists, updating.', project_id, dataset_id, table_id)
table = self.update_table(table_resource=table_resource)
else:
self.log.info('Table %s:%s.%s does not exist. creating.', project_id, dataset_id, table_id)
table = self.create_empty_table(
table_resource=table_resource, project_id=project_id
).to_api_repr()
return table
def run_table_delete(self, deletion_dataset_table: str, ignore_if_missing: bool = False) -> None:
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:return:
"""
warnings.warn("This method is deprecated. Please use `delete_table`.", DeprecationWarning)
return self.delete_table(table_id=deletion_dataset_table, not_found_ok=ignore_if_missing)
@GoogleBaseHook.fallback_to_default_project_id
def delete_table(
self,
table_id: str,
not_found_ok: bool = True,
project_id: Optional[str] = None,
) -> None:
"""
Delete an existing table from the dataset. If the table does not exist, return an error
unless not_found_ok is set to True.
:param table_id: A dotted ``(<project>.|<project>:)<dataset>.<table>``
that indicates which table will be deleted.
:type table_id: str
:param not_found_ok: if True, then return success even if the
requested table does not exist.
:type not_found_ok: bool
:param project_id: the project used to perform the request
:type project_id: str
"""
self.get_client(project_id=project_id).delete_table(
table=Table.from_string(table_id),
not_found_ok=not_found_ok,
)
self.log.info('Deleted table %s', table_id)
def get_tabledata(
self,
dataset_id: str,
table_id: str,
max_results: Optional[int] = None,
selected_fields: Optional[str] = None,
page_token: Optional[str] = None,
start_index: Optional[int] = None,
) -> List[Dict]:
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: list of rows
"""
warnings.warn("This method is deprecated. Please use `list_rows`.", DeprecationWarning)
rows = self.list_rows(dataset_id, table_id, max_results, selected_fields, page_token, start_index)
return [dict(r) for r in rows]
@GoogleBaseHook.fallback_to_default_project_id
def list_rows(
self,
dataset_id: str,
table_id: str,
max_results: Optional[int] = None,
selected_fields: Optional[Union[List[str], str]] = None,
page_token: Optional[str] = None,
start_index: Optional[int] = None,
project_id: Optional[str] = None,
location: Optional[str] = None,
) -> List[Row]:
"""
List the rows of the table.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:param project_id: Project ID for the project which the client acts on behalf of.
:param location: Default location for job.
:return: list of rows
"""
location = location or self.location
if isinstance(selected_fields, str):
selected_fields = selected_fields.split(",")
if selected_fields:
selected_fields = [SchemaField(n, "") for n in selected_fields]
else:
selected_fields = None
table = self._resolve_table_reference(
table_resource={},
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
result = self.get_client(project_id=project_id, location=location).list_rows(
table=Table.from_api_repr(table),
selected_fields=selected_fields,
max_results=max_results,
page_token=page_token,
start_index=start_index,
)
return list(result)
@GoogleBaseHook.fallback_to_default_project_id
def get_schema(self, dataset_id: str, table_id: str, project_id: Optional[str] = None) -> dict:
"""
Get the schema for a given dataset and table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:param project_id: the optional project ID of the requested table.
If not provided, the connector's configured project will be used.
:return: a table schema
"""
table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id)
table = self.get_client(project_id=project_id).get_table(table_ref)
return {"fields": [s.to_api_repr for s in table.schema]}
@GoogleBaseHook.fallback_to_default_project_id
def poll_job_complete(
self,
job_id: str,
project_id: Optional[str] = None,
location: Optional[str] = None,
retry: Retry = DEFAULT_RETRY,
) -> bool:
"""
Check if jobs completed.
:param job_id: id of the job.
:type job_id: str
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
:rtype: bool
"""
location = location or self.location
job = self.get_client(project_id=project_id, location=location).get_job(job_id=job_id)
return job.done(retry=retry)
def cancel_query(self) -> None:
"""Cancel all started queries that have not yet completed"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.cancel_job`.",
DeprecationWarning,
)
if self.running_job_id:
self.cancel_job(job_id=self.running_job_id)
else:
self.log.info('No running BigQuery jobs to cancel.')
@GoogleBaseHook.fallback_to_default_project_id
def cancel_job(
self,
job_id: str,
project_id: Optional[str] = None,
location: Optional[str] = None,
) -> None:
"""
Cancels a job an wait for cancellation to complete
:param job_id: id of the job.
:type job_id: str
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
"""
location = location or self.location
if self.poll_job_complete(job_id=job_id):
self.log.info('No running BigQuery jobs to cancel.')
return
self.log.info('Attempting to cancel job : %s, %s', project_id, job_id)
self.get_client(location=location, project_id=project_id).cancel_job(job_id=job_id)
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts += 1
job_complete = self.poll_job_complete(job_id)
if job_complete:
self.log.info('Job successfully canceled: %s, %s', project_id, job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
job_id,
)
else:
self.log.info('Waiting for canceled job with id %s to finish.', job_id)
time.sleep(5)
@GoogleBaseHook.fallback_to_default_project_id
def get_job(
self,
job_id: Optional[str] = None,
project_id: Optional[str] = None,
location: Optional[str] = None,
) -> Union[CopyJob, QueryJob, LoadJob, ExtractJob]:
"""
Retrieves a BigQuery job. For more information see:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
characters. If not provided then uuid will be generated.
:type job_id: str
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
"""
client = self.get_client(project_id=project_id, location=location)
job = client.get_job(job_id=job_id, project=project_id, location=location)
return job
@staticmethod
def _custom_job_id(configuration: Dict[str, Any]) -> str:
hash_base = json.dumps(configuration, sort_keys=True)
uniqueness_suffix = hashlib.md5(hash_base.encode()).hexdigest()
microseconds_from_epoch = int(
(datetime.now() - datetime.fromtimestamp(0)) / timedelta(microseconds=1)
)
return f"airflow_{microseconds_from_epoch}_{uniqueness_suffix}"
@GoogleBaseHook.fallback_to_default_project_id
def insert_job(
self,
configuration: Dict,
job_id: Optional[str] = None,
project_id: Optional[str] = None,
location: Optional[str] = None,
) -> BigQueryJob:
"""
Executes a BigQuery job. Waits for the job to complete and returns job id.
See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
:type configuration: Dict[str, Any]
:param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
characters. If not provided then uuid will be generated.
:type job_id: str
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
"""
location = location or self.location
job_id = job_id or self._custom_job_id(configuration)
client = self.get_client(project_id=project_id, location=location)
job_data = {
"configuration": configuration,
"jobReference": {"jobId": job_id, "projectId": project_id, "location": location},
}
# pylint: disable=protected-access
supported_jobs = {
LoadJob._JOB_TYPE: LoadJob,
CopyJob._JOB_TYPE: CopyJob,
ExtractJob._JOB_TYPE: ExtractJob,
QueryJob._JOB_TYPE: QueryJob,
}
# pylint: enable=protected-access
job = None
for job_type, job_object in supported_jobs.items():
if job_type in configuration:
job = job_object
break
if not job:
raise AirflowException(f"Unknown job type. Supported types: {supported_jobs.keys()}")
job = job.from_api_repr(job_data, client)
self.log.info("Inserting job %s", job.job_id)
# Start the job and wait for it to complete and get the result.
job.result()
return job
def run_with_configuration(self, configuration: dict) -> str:
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
warnings.warn("This method is deprecated. Please use `BigQueryHook.insert_job`", DeprecationWarning)
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
def run_load( # pylint: disable=too-many-locals,too-many-arguments,invalid-name
self,
destination_project_dataset_table: str,
source_uris: List,
schema_fields: Optional[List] = None,
source_format: str = 'CSV',
create_disposition: str = 'CREATE_IF_NEEDED',
skip_leading_rows: int = 0,
write_disposition: str = 'WRITE_EMPTY',
field_delimiter: str = ',',
max_bad_records: int = 0,
quote_character: Optional[str] = None,
ignore_unknown_values: bool = False,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
encoding: str = "UTF-8",
schema_update_options: Optional[Iterable] = None,
src_fmt_configs: Optional[Dict] = None,
time_partitioning: Optional[Dict] = None,
cluster_fields: Optional[List] = None,
autodetect: bool = False,
encryption_configuration: Optional[Dict] = None,
) -> str:
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table to load data into. If ``<project>`` is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Required if autodetect=False; optional if autodetect=True.
:type schema_fields: list
:param autodetect: Attempt to autodetect the schema for CSV and JSON
source files.
:type autodetect: bool
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
source_format is CSV.
:type allow_jagged_rows: bool
:param encoding: The character encoding of the data.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:type encoding: str
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: Union[list, tuple, set]
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning
)
if not self.project_id:
raise ValueError("The project_id should be set")
# To provide backward compatibility
schema_update_options = list(schema_update_options or [])
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat # noqa # pylint: disable=line-too-long
if schema_fields is None and not autodetect:
raise ValueError('You must either pass a schema or autodetect=True.')
if src_fmt_configs is None:
src_fmt_configs = {}
source_format = source_format.upper()
allowed_formats = [
"CSV",
"NEWLINE_DELIMITED_JSON",
"AVRO",
"GOOGLE_SHEETS",
"DATASTORE_BACKUP",
"PARQUET",
]
if source_format not in allowed_formats:
raise ValueError(
"{} is not a valid source format. "
"Please use one of the following types: {}".format(source_format, allowed_formats)
)
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = ['ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"]
if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):
raise ValueError(
"{} contains invalid schema update options."
"Please only use one or more of the following options: {}".format(
schema_update_options, allowed_schema_update_options
)
)
destination_project, destination_dataset, destination_table = _split_tablename(
table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table',
)
configuration = {
'load': {
'autodetect': autodetect,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values,
}
}
time_partitioning = _cleanse_time_partitioning(destination_project_dataset_table, time_partitioning)
if time_partitioning:
configuration['load'].update({'timePartitioning': time_partitioning})
if cluster_fields:
configuration['load'].update({'clustering': {'fields': cluster_fields}})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
else:
self.log.info("Adding experimental 'schemaUpdateOptions': %s", schema_update_options)
configuration['load']['schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
if encryption_configuration:
configuration["load"]["destinationEncryptionConfiguration"] = encryption_configuration
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows',
'allowQuotedNewlines',
'autodetect',
'fieldDelimiter',
'skipLeadingRows',
'ignoreUnknownValues',
'nullMarker',
'quote',
'encoding',
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': ['useAvroLogicalTypes'],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
backward_compatibility_configs = {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'ignoreUnknownValues': ignore_unknown_values,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'encoding': encoding,
}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
def run_copy( # pylint: disable=invalid-name
self,
source_project_dataset_tables: Union[List, str],
destination_project_dataset_table: str,
write_disposition: str = 'WRITE_EMPTY',
create_disposition: str = 'CREATE_IF_NEEDED',
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
) -> str:
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning
)
if not self.project_id:
raise ValueError("The project_id should be set")
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables
)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = _split_tablename(
table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table',
)
source_project_dataset_tables_fixup.append(
{'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table}
)
destination_project, destination_dataset, destination_table = _split_tablename(
table_input=destination_project_dataset_table, default_project_id=self.project_id
)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
}
}
if labels:
configuration['labels'] = labels
if encryption_configuration:
configuration["copy"]["destinationEncryptionConfiguration"] = encryption_configuration
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
def run_extract(
self,
source_project_dataset_table: str,
destination_cloud_storage_uris: str,
compression: str = 'NONE',
export_format: str = 'CSV',
field_delimiter: str = ',',
print_header: bool = True,
labels: Optional[Dict] = None,
) -> str:
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning
)
if not self.project_id:
raise ValueError("The project_id should be set")
source_project, source_dataset, source_table = _split_tablename(
table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table',
)
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
} # type: Dict[str, Any]
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
# pylint: disable=too-many-locals,too-many-arguments, too-many-branches
def run_query(
self,
sql: str,
destination_dataset_table: Optional[str] = None,
write_disposition: str = 'WRITE_EMPTY',
allow_large_results: bool = False,
flatten_results: Optional[bool] = None,
udf_config: Optional[List] = None,
use_legacy_sql: Optional[bool] = None,
maximum_billing_tier: Optional[int] = None,
maximum_bytes_billed: Optional[float] = None,
create_disposition: str = 'CREATE_IF_NEEDED',
query_params: Optional[List] = None,
labels: Optional[Dict] = None,
schema_update_options: Optional[Iterable] = None,
priority: str = 'INTERACTIVE',
time_partitioning: Optional[Dict] = None,
api_resource_configs: Optional[Dict] = None,
cluster_fields: Optional[List[str]] = None,
location: Optional[str] = None,
encryption_configuration: Optional[Dict] = None,
) -> str:
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param sql: The BigQuery SQL to execute.
:type sql: str
:param destination_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to save the query results.
:type destination_dataset_table: str
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: bool
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by the
BigQueryHook like args.
:type api_resource_configs: dict
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: str
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: list
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the query job.
:type schema_update_options: Union[list, tuple, set]
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning
)
if not self.project_id:
raise ValueError("The project_id should be set")
schema_update_options = list(schema_update_options or [])
if time_partitioning is None:
time_partitioning = {}
if location:
self.location = location
if not api_resource_configs:
api_resource_configs = self.api_resource_configs
else:
_validate_value('api_resource_configs', api_resource_configs, dict)
configuration = deepcopy(api_resource_configs)
if 'query' not in configuration:
configuration['query'] = {}
else:
_validate_value("api_resource_configs['query']", configuration['query'], dict)
if sql is None and not configuration['query'].get('query', None):
raise TypeError('`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`')
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions # noqa # pylint: disable=line-too-long
allowed_schema_update_options = ['ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"]
if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):
raise ValueError(
"{} contains invalid schema update options. "
"Please only use one or more of the following "
"options: {}".format(schema_update_options, allowed_schema_update_options)
)
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
if destination_dataset_table:
destination_project, destination_dataset, destination_table = _split_tablename(
table_input=destination_dataset_table, default_project_id=self.project_id
)
destination_dataset_table = { # type: ignore
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
if cluster_fields:
cluster_fields = {'fields': cluster_fields} # type: ignore
query_param_list = [
(sql, 'query', None, (str,)),
(priority, 'priority', 'INTERACTIVE', (str,)),
(use_legacy_sql, 'useLegacySql', self.use_legacy_sql, bool),
(query_params, 'queryParameters', None, list),
(udf_config, 'userDefinedFunctionResources', None, list),
(maximum_billing_tier, 'maximumBillingTier', None, int),
(maximum_bytes_billed, 'maximumBytesBilled', None, float),
(time_partitioning, 'timePartitioning', {}, dict),
(schema_update_options, 'schemaUpdateOptions', None, list),
(destination_dataset_table, 'destinationTable', None, dict),
(cluster_fields, 'clustering', None, dict),
] # type: List[Tuple]
for param, param_name, param_default, param_type in query_param_list:
if param_name not in configuration['query'] and param in [None, {}, ()]:
if param_name == 'timePartitioning':
param_default = _cleanse_time_partitioning(destination_dataset_table, time_partitioning)
param = param_default
if param in [None, {}, ()]:
continue
_api_resource_configs_duplication_check(param_name, param, configuration['query'])
configuration['query'][param_name] = param
# check valid type of provided param,
# it last step because we can get param from 2 sources,
# and first of all need to find it
_validate_value(param_name, configuration['query'][param_name], param_type)
if param_name == 'schemaUpdateOptions' and param:
self.log.info("Adding experimental 'schemaUpdateOptions': %s", schema_update_options)
if param_name != 'destinationTable':
continue
for key in ['projectId', 'datasetId', 'tableId']:
if key not in configuration['query']['destinationTable']:
raise ValueError(
"Not correct 'destinationTable' in "
"api_resource_configs. 'destinationTable' "
"must be a dict with {'projectId':'', "
"'datasetId':'', 'tableId':''}"
)
configuration['query'].update(
{
'allowLargeResults': allow_large_results,
'flattenResults': flatten_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
}
)
if (
'useLegacySql' in configuration['query']
and configuration['query']['useLegacySql']
and 'queryParameters' in configuration['query']
):
raise ValueError("Query parameters are not allowed when using legacy SQL")
if labels:
_api_resource_configs_duplication_check('labels', labels, configuration)
configuration['labels'] = labels
if encryption_configuration:
configuration["query"]["destinationEncryptionConfiguration"] = encryption_configuration
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(
self, project_id: str, service: str, reauth: bool = False, verbose: bool = False, dialect="legacy"
) -> None:
super().__init__(project_id)
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection:
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs) -> None:
self._args = args
self._kwargs = kwargs
def close(self) -> None: # noqa: D403
"""BigQueryConnection does not have anything to close"""
def commit(self) -> None: # noqa: D403
"""BigQueryConnection does not support transactions"""
def cursor(self) -> "BigQueryCursor": # noqa: D403
"""Return a new :py:class:`Cursor` object using the connection"""
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self) -> NoReturn: # noqa: D403
"""BigQueryConnection does not have transactions"""
raise NotImplementedError("BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(
self,
service: Any,
project_id: str,
hook: BigQueryHook,
use_legacy_sql: bool = True,
api_resource_configs: Optional[Dict] = None,
location: Optional[str] = None,
num_retries: int = 5,
) -> None:
super().__init__()
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
if api_resource_configs:
_validate_value("api_resource_configs", api_resource_configs, dict)
self.api_resource_configs = api_resource_configs if api_resource_configs else {} # type Dict
self.running_job_id = None # type: Optional[str]
self.location = location
self.num_retries = num_retries
self.hook = hook
def create_empty_table(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.create_empty_table(*args, **kwargs)
def create_empty_dataset(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_dataset`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.create_empty_dataset(*args, **kwargs)
def get_dataset_tables(self, *args, **kwargs) -> List[Dict[str, Any]]:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_dataset_tables(*args, **kwargs)
def delete_dataset(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.delete_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.delete_dataset`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.delete_dataset(*args, **kwargs)
def create_external_table(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_external_table`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_external_table`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.create_external_table(*args, **kwargs)
def patch_table(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_table`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_table`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.patch_table(*args, **kwargs)
def insert_all(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_all`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_all`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.insert_all(*args, **kwargs)
def update_dataset(self, *args, **kwargs) -> Dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset`",
DeprecationWarning,
stacklevel=3,
)
return Dataset.to_api_repr(self.hook.update_dataset(*args, **kwargs))
def patch_dataset(self, *args, **kwargs) -> Dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_dataset`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.patch_dataset(*args, **kwargs)
def get_dataset_tables_list(self, *args, **kwargs) -> List[Dict[str, Any]]:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables_list`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables_list`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_dataset_tables_list(*args, **kwargs)
def get_datasets_list(self, *args, **kwargs) -> list:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_datasets_list`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_datasets_list`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_datasets_list(*args, **kwargs)
def get_dataset(self, *args, **kwargs) -> dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_dataset(*args, **kwargs)
def run_grant_dataset_view_access(self, *args, **kwargs) -> dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_grant_dataset_view_access`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks"
".bigquery.BigQueryHook.run_grant_dataset_view_access`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_grant_dataset_view_access(*args, **kwargs)
def run_table_upsert(self, *args, **kwargs) -> dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_upsert`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_upsert`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_table_upsert(*args, **kwargs)
def run_table_delete(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_delete`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_delete`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_table_delete(*args, **kwargs)
def get_tabledata(self, *args, **kwargs) -> List[dict]:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_tabledata`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_tabledata`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_tabledata(*args, **kwargs)
def get_schema(self, *args, **kwargs) -> dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_schema(*args, **kwargs)
def poll_job_complete(self, *args, **kwargs) -> bool:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.poll_job_complete(*args, **kwargs)
def cancel_query(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.cancel_query`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.cancel_query`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.cancel_query(*args, **kwargs) # type: ignore # noqa
def run_with_configuration(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_with_configuration`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_with_configuration`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_with_configuration(*args, **kwargs)
def run_load(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_load`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_load`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_load(*args, **kwargs)
def run_copy(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_copy`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_copy`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_copy(*args, **kwargs)
def run_extract(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_extract`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_extract`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_extract(*args, **kwargs)
def run_query(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_query`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_query`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_query(*args, **kwargs)
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(
self,
service: Any,
project_id: str,
hook: BigQueryHook,
use_legacy_sql: bool = True,
location: Optional[str] = None,
num_retries: int = 5,
) -> None:
super().__init__(
service=service,
project_id=project_id,
hook=hook,
use_legacy_sql=use_legacy_sql,
location=location,
num_retries=num_retries,
)
self.buffersize = None # type: Optional[int]
self.page_token = None # type: Optional[str]
self.job_id = None # type: Optional[str]
self.buffer = [] # type: list
self.all_pages_loaded = False # type: bool
@property
def description(self) -> None:
"""The schema description method is not currently implemented"""
raise NotImplementedError
def close(self) -> None:
"""By default, do nothing"""
@property
def rowcount(self) -> int:
"""By default, return -1 to indicate that this is not supported"""
return -1
def execute(self, operation: str, parameters: Optional[dict] = None) -> None:
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: str
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
sql = _bind_parameters(operation, parameters) if parameters else operation
self.flush_results()
self.job_id = self.hook.run_query(sql)
def executemany(self, operation: str, seq_of_parameters: list) -> None:
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: str
:param seq_of_parameters: List of dictionary parameters to substitute into the
query.
:type seq_of_parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def flush_results(self) -> None:
"""Flush results related cursor attributes"""
self.page_token = None
self.job_id = None
self.all_pages_loaded = False
self.buffer = []
def fetchone(self) -> Union[List, None]:
"""Fetch the next row of a query result set"""
# pylint: disable=not-callable
return self.next()
def next(self) -> Union[List, None]:
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if not self.buffer:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
location=self.location,
pageToken=self.page_token,
)
.execute(num_retries=self.num_retries)
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = [_bq_cast(vs['v'], col_types[idx]) for idx, vs in enumerate(dict_row['f'])]
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.flush_results()
return None
return self.buffer.pop(0)
def fetchmany(self, size: Optional[int] = None) -> list:
"""
Fetch the next set of rows of a query result, returning a sequence of sequences
(e.g. a list of tuples). An empty sequence is returned when no more rows are
available. The number of rows to fetch per call is specified by the parameter.
If it is not given, the cursor's arraysize determines the number of rows to be
fetched. The method should try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number of rows not being
available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error`
(or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
result.append(one)
return result
def fetchall(self) -> List[list]:
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
result.append(one)
return result
def get_arraysize(self) -> int:
"""Specifies the number of rows to fetch at a time with .fetchmany()"""
return self.buffersize or 1
def set_arraysize(self, arraysize: int) -> None:
"""Specifies the number of rows to fetch at a time with .fetchmany()"""
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes: Any) -> None:
"""Does nothing by default"""
def setoutputsize(self, size: Any, column: Any = None) -> None:
"""Does nothing by default"""
def _bind_parameters(operation: str, parameters: dict) -> str:
"""Helper method that binds parameters to a SQL query"""
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {} # type Dict[str, str]
for (name, value) in parameters.items():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, str):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s: str) -> str:
"""Helper method that escapes parameters to a SQL query"""
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field: str, bq_type: str) -> Union[None, int, float, bool, str]:
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type in ('FLOAT', 'TIMESTAMP'):
return float(string_field)
elif bq_type == 'BOOLEAN':
if string_field not in ['true', 'false']:
raise ValueError(f"{string_field} must have value 'true' or 'false'")
return string_field == 'true'
else:
return string_field
def _split_tablename(
table_input: str, default_project_id: str, var_name: Optional[str] = None
) -> Tuple[str, str, str]:
if '.' not in table_input:
raise ValueError(f'Expected table name in the format of <dataset>.<table>. Got: {table_input}')
if not default_project_id:
raise ValueError("INTERNAL: No default project is specified")
def var_print(var_name):
if var_name is None:
return ""
else:
return f"Format exception for {var_name}: "
if table_input.count('.') + table_input.count(':') > 3:
raise Exception(
'{var}Use either : or . to specify project '
'got {input}'.format(var=var_print(var_name), input=table_input)
)
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(
'{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}'.format(var=var_print(var_name), input=table_input)
)
cmpt = rest.split('.')
if len(cmpt) == 3:
if project_id:
raise ValueError("{var}Use either : or . to specify project".format(var=var_print(var_name)))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
'{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}'.format(var=var_print(var_name), input=table_input)
)
if project_id is None:
if var_name is not None:
log.info(
'Project not included in %s: %s; using project "%s"',
var_name,
table_input,
default_project_id,
)
project_id = default_project_id
return project_id, dataset_id, table_id
def _cleanse_time_partitioning(
destination_dataset_table: Optional[str], time_partitioning_in: Optional[Dict]
) -> Dict: # if it is a partitioned table ($ is in the table name) add partition load option
if time_partitioning_in is None:
time_partitioning_in = {}
time_partitioning_out = {}
if destination_dataset_table and '$' in destination_dataset_table:
time_partitioning_out['type'] = 'DAY'
time_partitioning_out.update(time_partitioning_in)
return time_partitioning_out
def _validate_value(key: Any, value: Any, expected_type: Type) -> None:
"""Function to check expected type and raise error if type is not correct"""
if not isinstance(value, expected_type):
raise TypeError("{} argument must have a type {} not {}".format(key, expected_type, type(value)))
def _api_resource_configs_duplication_check(
key: Any, value: Any, config_dict: dict, config_dict_name='api_resource_configs'
) -> None:
if key in config_dict and value != config_dict[key]:
raise ValueError(
"Values of {param_name} param are duplicated. "
"{dict_name} contained {param_name} param "
"in `query` config and {param_name} was also provided "
"with arg to run_query() method. Please remove duplicates.".format(
param_name=key, dict_name=config_dict_name
)
)
def _validate_src_fmt_configs(
source_format: str,
src_fmt_configs: dict,
valid_configs: List[str],
backward_compatibility_configs: Optional[Dict] = None,
) -> Dict:
"""
Validates the given src_fmt_configs against a valid configuration for the source format.
Adds the backward compatibility config to the src_fmt_configs.
:param source_format: File format to export.
:type source_format: str
:param src_fmt_configs: Configure optional fields specific to the source format.
:type src_fmt_configs: dict
:param valid_configs: Valid configuration specific to the source format
:type valid_configs: List[str]
:param backward_compatibility_configs: The top-level params for backward-compatibility
:type backward_compatibility_configs: dict
"""
if backward_compatibility_configs is None:
backward_compatibility_configs = {}
for k, v in backward_compatibility_configs.items():
if k not in src_fmt_configs and k in valid_configs:
src_fmt_configs[k] = v
for k, v in src_fmt_configs.items():
if k not in valid_configs:
raise ValueError(f"{k} is not a valid src_fmt_configs for type {source_format}.")
return src_fmt_configs
|
apache-2.0
|
jdfr/pyslic3r
|
slicesViewer.py
|
1
|
11508
|
import sys
import iopaths as io
debugfile = "cosa.log"
scalingFactor = 0.00000001
craw = '#cccccc' #gray
raw_fac = 0.7
craw3d = (raw_fac, raw_fac, raw_fac)
cmap_raw = 'Greys'
cmaps_toolpaths = [
'Reds',
'Blues',
'Greens',
]
cmaps_contours = [
'Oranges',
'Purples',
'spring',
]
cperimeters = [
'#ff0000', #red
'#0000ff', #blue
'#00ff00', #green
'#00ffff', #cyan
'#ffff00', #yellow
'#ff00ff', #magenta
]
cinfillings = [col.replace('ff', '44') for col in cperimeters]
csurfaces = [col.replace('ff', 'bb') for col in cperimeters]
ccontours = [
'#ffaaaa', #red
'#aaaaff', #blue
'#aaffaa', #green
'#aaffff', #cyan
'#ffffaa', #yellow
'#ffaaff', #magenta
]
cperimeters3d = [
(1,0,0), #red
(0,1,0), #blue
(0,0,1), #green
(0,1,1), #cyan
(1,1,0), #yellow
(1,0,1), #magenta
]
infilling_fac = 0.25
surface_fac = 0.25
cinfillings3d = [tuple(c*infilling_fac for c in col) for col in cperimeters3d]
csurfaces3d = [tuple(c*surface_fac for c in col) for col in cperimeters3d]
ccontours3d = [
(1, 0.66, 0.66), #red
(0.66, 1, 0.66), #blue
(0.66, 0.66, 1), #green
(0.66, 1, 1), #cyan
(1, 1, 0.66), #yellow
(1, 0.66, 1), #magenta
]
ncols = len(cperimeters)
ncmaps = len(cmaps_toolpaths)
#index in the list (to be passed to showSlices) of each path type-ntool
def showlistidx(typ, ntool):
if typ==io.PATHTYPE_RAW_CONTOUR:
return 0
else:
if typ==io.PATHTYPE_PROCESSED_CONTOUR:
return 1+ntool
elif typ==io.PATHTYPE_TOOLPATH_PERIMETER:
return 1+ntool+contents.numtools
elif typ==io.PATHTYPE_TOOLPATH_INFILLING:
return 1+ntool+contents.numtools*2
elif typ==io.PATHTYPE_TOOLPATH_SURFACE:
return 1+ntool+contents.numtools*3
def type2str(typ):
if typ==io.PATHTYPE_RAW_CONTOUR:
return 'raw'
elif typ==io.PATHTYPE_PROCESSED_CONTOUR:
return 'contour'
elif typ==io.PATHTYPE_TOOLPATH_PERIMETER:
return 'perimeter'
elif typ==io.PATHTYPE_TOOLPATH_INFILLING:
return 'infilling'
elif typ==io.PATHTYPE_TOOLPATH_SURFACE:
return 'surface'
def show2D(contents, windowname, custom_formatting):
nocustom = custom_formatting is None
#generate an ordered list of lists of paths according to showlistidx.
#The lists of paths are sorted by z, with empty places if needed,
#so all lists have the same number of elements, and are Z-ordered
nelems = contents.numtools*(len(io.ALL_TYPES)-1)+1
pathsbytype_list = [None]*nelems
scalings_list = [None]*nelems
usePatches_list = [None]*nelems
linestyles_list = [None]*nelems
patchestyles_list = [None]*nelems
for key in contents.records.keys():
typ, ntool = key
if not typ in io.ALL_TYPES:
raise Exception('Unrecognized path type %d' % typ)
byz = contents.records[key]
byzl = []
byzls = []
for z in contents.zs:
if z in byz:
if byz[z].savemode==io.SAVEMODE_DOUBLE_3D:
raise Exception('path with type=%d, ntool=%d, z=%f is 3D, cannot show it in matplotlib!!!!' % (typ, ntool, z))
byzl .append(byz[z].paths)
byzls.append(byz[z].scaling)
else:
byzl .append([])
byzls.append([])
idx = showlistidx(typ, ntool)
pathsbytype_list[idx] = byzl
scalings_list [idx] = byzls
if nocustom:
if typ==io.PATHTYPE_RAW_CONTOUR:
usePatches_list[idx] = True
linestyles_list[idx] = None
patchestyles_list[idx] = {'facecolor':craw, 'edgecolor':'none', 'lw': 1}
elif typ==io.PATHTYPE_PROCESSED_CONTOUR:
usePatches_list[idx] = True
linestyles_list[idx] = None
patchestyles_list[idx] = {'facecolor':ccontours[ntool%ncols], 'edgecolor':'none', 'lw': 1}
elif typ==io.PATHTYPE_TOOLPATH_PERIMETER:
usePatches_list[idx] = False
linestyles_list[idx] = {'linewidths':2, 'colors': cperimeters[ntool%ncols]}
patchestyles_list[idx] = None
elif typ==io.PATHTYPE_TOOLPATH_INFILLING:
usePatches_list[idx] = False
linestyles_list[idx] = {'linewidths':2, 'colors': cinfillings[ntool%ncols]}
patchestyles_list[idx] = None
elif typ==io.PATHTYPE_TOOLPATH_SURFACE:
usePatches_list[idx] = False
linestyles_list[idx] = {'linewidths':2, 'colors': csurfaces[ntool%ncols]}
patchestyles_list[idx] = None
else:
typs = type2str(typ)
if typ==io.PATHTYPE_RAW_CONTOUR:
usePatches_list[idx] = custom_formatting[typs]['usepatches']
linestyles_list[idx] = custom_formatting[typs]['linestyle']
patchestyles_list[idx] = custom_formatting[typs]['patchstyle']
else:
length = len(custom_formatting[typs])
usePatches_list[idx] = custom_formatting[typs][ntool%length]['usepatches']
linestyles_list[idx] = custom_formatting[typs][ntool%length]['linestyle']
patchestyles_list[idx] = custom_formatting[typs][ntool%length]['patchstyle']
p2.showSlices(pathsbytype_list, modeN=True, title=windowname, BB=[], zs=contents.zs, linestyle=linestyles_list, patchArgs=patchestyles_list, usePatches=usePatches_list, scalingFactor=scalings_list)
def show3D(contents, windowname, custom_formatting):
nocustom = custom_formatting is None
nelems = contents.numtools*(len(io.ALL_TYPES)-1)+1
paths_list = [None]*nelems
mode_list = [None]*nelems
args_list = [None]*nelems
for key in contents.records.keys():
typ, ntool = key
if not typ in io.ALL_TYPES:
raise Exception('Unrecognized path type %d' % typ)
byz = contents.records[key]
byzl = []
for z in contents.zs:
if z in byz:
byzl.append([z, byz[z].paths, byz[z].scaling])
idx = showlistidx(typ, ntool)
paths_list[idx] = byzl
if nocustom:
if typ==io.PATHTYPE_RAW_CONTOUR:
mode_list[idx] = 'contour'
args_list[idx] = {'color': craw3d, 'line_width':2}
#args_list[idx] = {'colormap':cmap_raw, 'line_width':2}
elif typ==io.PATHTYPE_PROCESSED_CONTOUR:
mode_list[idx] = 'line'
args_list[idx] = {'color': ccontours3d[ntool%ncols], 'line_width':2}
#args_list[idx] = {'colormap':cmaps_contours[ntool%ncmaps], 'line_width':2}
elif typ==io.PATHTYPE_TOOLPATH_PERIMETER:
mode_list[idx] = 'line'
args_list[idx] = {'color': cperimeters3d[ntool%ncols], 'line_width':2}
#TODO: decimate the lines (maybe implement a Douglas-Peucker?) before adding the tubes!!!!!
#mode_list[idx] = 'tube'
#args_list[idx] = {'color': cperimeters3d[ntool%ncols], 'line_width':2, 'tube_radius':contents.xradiuses[ntool]}
##args_list[idx] = {'colormap':cmaps_toolpaths[ntool%ncmaps], 'line_width':2}
elif typ==io.PATHTYPE_TOOLPATH_INFILLING:
mode_list[idx] = 'line'
args_list[idx] = {'color': cinfillings3d[ntool%ncols], 'line_width':2}
#TODO: decimate the lines (maybe implement a Douglas-Peucker?) before adding the tubes!!!!!
#mode_list[idx] = 'tube'
#args_list[idx] = {'color': cinfillings3d[ntool%ncols], 'line_width':2, 'tube_radius':contents.xradiuses[ntool]}
##args_list[idx] = {'colormap':cmaps_toolpaths[ntool%ncmaps], 'line_width':2}
elif typ==io.PATHTYPE_TOOLPATH_SURFACE:
mode_list[idx] = 'line'
args_list[idx] = {'color': csurfaces3d[ntool%ncols], 'line_width':2}
#TODO: decimate the lines (maybe implement a Douglas-Peucker?) before adding the tubes!!!!!
#mode_list[idx] = 'tube'
#args_list[idx] = {'color': cinfillings3d[ntool%ncols], 'line_width':2, 'tube_radius':contents.xradiuses[ntool]}
##args_list[idx] = {'colormap':cmaps_toolpaths[ntool%ncmaps], 'line_width':2}
else:
typs = type2str(typ)
if typ==io.PATHTYPE_RAW_CONTOUR:
mode_list[idx] = custom_formatting[typs]['mode']
args_list[idx] = custom_formatting[typs]['args']
else:
length = len(custom_formatting[typs])
mode_list[idx] = custom_formatting[typs][ntool%length]['mode']
args_list[idx] = custom_formatting[typs][ntool%length]['args']
if mode_list[idx]=='tube':
args_list[idx]['tube_radius'] = contents.xradiuses[ntool]
p3.showSlices(paths_list, title=windowname, modes=mode_list, argss=args_list)
def check_args(cond, errmsg):
if cond:
USAGE = ("\n\nUSAGE: %s WINDOWNAME (2d|3d) (pipe | file INPUTFILE) [CUSTOMFORMATTING]\n"
" WINDOWNAME: name of the window\n"
" 2d/3d: will display in 2D (matplotlib) or 3D (mayavi) mode\n"
" pipe: will read data from binary stdin\n"
" file INPUTFILENAME: will read data from input file\n"
" CUSTOMFORMATTING: if present, it is evaluated to a python structure containing formatting info. It is 2d/3d mode dependent, and no check is done, so it is very brittle, please see the code."
) % sys.argv[0]
sys.stderr.write(errmsg+USAGE)
sys.exit()
if __name__ == "__main__":
argidx=1
argc = len(sys.argv)
check_args(argidx>=argc, 'need to read the window name (first argument), but no more arguments!')
windowname = sys.argv[argidx]
argidx+=1
check_args(argidx>=argc, 'need to read the display mode (second argument), but no more arguments!')
use2d = sys.argv[argidx].lower()
check_args(not use2d in ['2d', '3d'], 'second argument must be either "2d" or "3d", but it is %s!!!' % use2d)
use2d = use2d=='2d'
argidx+=1
check_args(argidx>=argc, 'need to read the input mode argument (third argument), but no more arguments!')
usefile = sys.argv[argidx].lower()
check_args(not usefile in ['file', 'pipe'], 'third argument must be either "file" or "pipe", but it is %s!!!' % usefile)
usefile = usefile == 'file'
argidx+=1
if usefile:
check_args(argidx>=argc, 'need to read the file name, but no more arguments!')
filename = sys.argv[argidx]
argidx+=1
else:
filename = None
if argidx<argc:
try:
custom_formatting = eval(sys.argv[argidx], None, None)
except:
check_args(True, 'Could not evaluate the parameter for custom formatting!!!')
argidx+=1
else:
custom_formatting = None
contents = io.FileContents()
contents.readFromFile(filename)
contents.organizeRecords()
if use2d:
import pyclipper.plot2d as p2
show2D(contents, windowname, custom_formatting)
else:
import pyclipper.plot3d as p3
show3D(contents, windowname, custom_formatting)
|
agpl-3.0
|
UNR-AERIAL/scikit-learn
|
sklearn/metrics/setup.py
|
299
|
1024
|
import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
bsd-3-clause
|
Clyde-fare/scikit-learn
|
sklearn/tests/test_learning_curve.py
|
225
|
10791
|
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
|
bsd-3-clause
|
wijnandhoitinga/nutils
|
examples/adaptivity.py
|
1
|
6208
|
#! /usr/bin/env python3
#
# In this script we solve the Laplace problem on a unit square that has the
# bottom-right quadrant removed (a.k.a. an L-shaped domain) with Dirichlet
# boundary conditions matching the harmonic function
#
# .. math:: \sqrt[3]{x^2 + y^2} \cos\left(\tfrac23 \arctan\frac{y+x}{y-x}\right),
#
# shifted by 0.5 such that the origin coincides with the middle of the unit
# square. This variation of a well known benchmark problem is known to converge
# suboptimally under uniform refinement due to a singular gradient in the
# reentrant corner. This script demonstrates that optimal convergence can be
# restored by using adaptive refinement.
from nutils import mesh, function, solver, util, export, cli, testing
import numpy, treelog
def main(etype:str, btype:str, degree:int, nrefine:int):
'''
Adaptively refined Laplace problem on an L-shaped domain.
.. arguments::
etype [square]
Type of elements (square/triangle/mixed).
btype [h-std]
Type of basis function (h/th-std/spline), with availability depending on
the configured element type.
degree [2]
Polynomial degree
nrefine [5]
Number of refinement steps to perform.
'''
domain, geom = mesh.unitsquare(2, etype)
x, y = geom - .5
exact = (x**2 + y**2)**(1/3) * function.cos(function.arctan2(y+x, y-x) * (2/3))
domain = domain.trim(exact-1e-15, maxrefine=0)
linreg = util.linear_regressor()
with treelog.iter.fraction('level', range(nrefine+1)) as lrange:
for irefine in lrange:
if irefine:
refdom = domain.refined
ns.refbasis = refdom.basis(btype, degree=degree)
indicator = refdom.integral('refbasis_n,k u_,k d:x' @ ns, degree=degree*2).eval(lhs=lhs)
indicator -= refdom.boundary.integral('refbasis_n u_,k n_k d:x' @ ns, degree=degree*2).eval(lhs=lhs)
supp = ns.refbasis.get_support(indicator**2 > numpy.mean(indicator**2))
domain = domain.refined_by(ns.refbasis.transforms[supp])
ns = function.Namespace()
ns.x = geom
ns.basis = domain.basis(btype, degree=degree)
ns.u = 'basis_n ?lhs_n'
ns.du = ns.u - exact
sqr = domain.boundary['trimmed'].integral('u^2 d:x' @ ns, degree=degree*2)
cons = solver.optimize('lhs', sqr, droptol=1e-15)
sqr = domain.boundary.integral('du^2 d:x' @ ns, degree=7)
cons = solver.optimize('lhs', sqr, droptol=1e-15, constrain=cons)
res = domain.integral('basis_n,k u_,k d:x' @ ns, degree=degree*2)
lhs = solver.solve_linear('lhs', res, constrain=cons)
ndofs = len(ns.basis)
error = domain.integral('<du^2, du_,k du_,k>_i d:x' @ ns, degree=7).eval(lhs=lhs)**.5
rate, offset = linreg.add(numpy.log(len(ns.basis)), numpy.log(error))
treelog.user('ndofs: {ndofs}, L2 error: {error[0]:.2e} ({rate[0]:.2f}), H1 error: {error[1]:.2e} ({rate[1]:.2f})'.format(ndofs=len(ns.basis), error=error, rate=rate))
bezier = domain.sample('bezier', 9)
x, u, du = bezier.eval(['x_i', 'u', 'du'] @ ns, lhs=lhs)
export.triplot('sol.png', x, u, tri=bezier.tri, hull=bezier.hull)
export.triplot('err.png', x, du, tri=bezier.tri, hull=bezier.hull)
return ndofs, error, lhs
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. For
# example, to perform four refinement steps with quadratic basis functions
# starting from a triangle mesh run :sh:`python3 adaptivity.py etype=triangle
# degree=2 nrefine=4`.
if __name__ == '__main__':
cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategic return values for regression testing. The :mod:`nutils.testing`
# module, which builds on the standard :mod:`unittest` framework, facilitates
# this by providing :func:`nutils.testing.TestCase.assertAlmostEqual64` for the
# embedding of desired results as compressed base64 data.
class test(testing.TestCase):
@testing.requires('matplotlib')
def test_square_quadratic(self):
ndofs, error, lhs = main(nrefine=2, btype='h-std', etype='square', degree=2)
with self.subTest('degrees of freedom'):
self.assertEqual(ndofs, 149)
with self.subTest('L2-error'):
self.assertAlmostEqual(error[0], 0.00065, places=5)
with self.subTest('H1-error'):
self.assertAlmostEqual(error[1], 0.03461, places=5)
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNo1j6FrQmEUxT8RBi4KllVfMsl3z/nK4zEmLC6bhsKCw2gSw5IPFsymGbZiWnr+By8Ii7Yhsk3BMtC4
Z9sJ223ncs85vzvmM9+Yhix8hDIjtnkdHqQSdDDDj1Qajr5qPXN/07MZ2vI4V7UOIvmdO/oEZY45xYDn
oR7ikLHAHVpcs2A1TLhChDO+MOeWt5xjYzm6fOQrGxxiZPeoMGaf37hCyU72hB0u6PglPcQcKxRI/KUd
7AYLvMPpsqGkCTPumzWf+qV92kKevjK36ozDP/FSnh1iteWiqWuf+oMaKuyKaC1i52rKPokiF2WLA/20
bya+ZCPbWKRPpvgFaedebw==''')
@testing.requires('matplotlib')
def test_triangle_quadratic(self):
ndofs, error, lhs = main(nrefine=2, btype='h-std', etype='triangle', degree=2)
with self.subTest('degrees of freedom'):
self.assertEqual(ndofs, 98)
with self.subTest('L2-error'):
self.assertAlmostEqual(error[0], 0.00138, places=5)
with self.subTest('H1-error'):
self.assertAlmostEqual(error[1], 0.05324, places=5)
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNprMV1oesqU2VTO1Nbko6myWbhpq+kckwST90avjRgYzptYm+YYMwBBk3GQWavZb1NXs2+mm83um1WY
bQbyXYEiQWbKZjNM7wJVzjBlYICoPW8CMiXH+LXRR9NwoPkg82xN5IB2MZu2mGabSBnnAbGscYEJj3GV
YQAQg/TVGfaA7RI0BsErRjeNeowDgDQPmF9gkmciaJxtArGjzrAKCGWNpYAQAL0kOBE=''')
@testing.requires('matplotlib')
def test_mixed_linear(self):
ndofs, error, lhs = main(nrefine=2, btype='h-std', etype='mixed', degree=1)
with self.subTest('degrees of freedom'):
self.assertEqual(ndofs, 34)
with self.subTest('L2-error'):
self.assertAlmostEqual(error[0], 0.00450, places=5)
with self.subTest('H1-error'):
self.assertAlmostEqual(error[1], 0.11683, places=5)
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNprMT1u6mQyxUTRzMCUAQhazL6b3jNrMYPxp5iA5FtMD+lcMgDxHa4aXzS+6HDV+fKO85cMnC8zMBzS
AQDBThbY''')
|
mit
|
arokem/pyAFQ
|
AFQ/data.py
|
2
|
86674
|
from dipy.align import resample
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import (AveragePointwiseEuclideanMetric,
ResampleFeature)
from dipy.io.streamline import load_tractogram, load_trk
from dipy.data.fetcher import _make_fetcher
import dipy.data as dpd
from io import BytesIO
import gzip
import os
import os.path as op
import json
from glob import glob
import shutil
import boto3
import s3fs
import numpy as np
import pandas as pd
import logging
import time
from bids import BIDSLayout
import bids.config as bids_config
try:
bids_config.set_option('extension_initial_dot', True)
except ValueError:
pass
from botocore import UNSIGNED
from botocore.client import Config
from dask import compute, delayed
from dask.diagnostics import ProgressBar
from pathlib import Path
from tqdm.auto import tqdm
import nibabel as nib
# capture templateflow resource warning and log
import warnings
default_warning_format = warnings.formatwarning
try:
warnings.formatwarning = lambda msg, *args, **kwargs: f'{msg}'
logging.captureWarnings(True)
pywarnings_logger = logging.getLogger('py.warnings')
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
pywarnings_logger.addHandler(console_handler)
warnings.filterwarnings(
"default", category=ResourceWarning,
module="templateflow")
from templateflow import api as tflow
finally:
logging.captureWarnings(False)
warnings.formatwarning = default_warning_format
__all__ = ["fetch_callosum_templates", "read_callosum_templates",
"fetch_templates", "read_templates", "fetch_hcp",
"fetch_stanford_hardi_tractography",
"read_stanford_hardi_tractography",
"organize_stanford_data"]
BUNDLE_RECO_2_AFQ = \
{
"AF_L": "ARC_L", "AF_R": "ARC_R",
"UF_L": "UNC_L", "UF_R": "UNC_R",
"IFOF_L": "IFO_L", "IFOF_R": "IFO_R",
"CST_L": "CST_L", "CST_R": "CST_R",
"ILF_L": "ILF_L", "ILF_R": "ILF_R",
"SLF_L": "SLF_L", "SLF_R": "SLF_R"
}
BUNDLE_MAT_2_PYTHON = \
{'Right Corticospinal': 'CST_R', 'Left Corticospinal': 'CST_L',
'RightCorticospinal': 'CST_R', 'LeftCorticospinal': 'CST_L',
'Right Uncinate': 'UNC_R', 'Left Uncinate': 'UNC_L',
'RightUncinate': 'UNC_R', 'LeftUncinate': 'UNC_L',
'Left IFOF': 'IFO_L', 'Right IFOF': 'IFO_R',
'LeftIFOF': 'IFO_L', 'RightIFOF': 'IFO_R',
'Right Arcuate': 'ARC_R', 'Left Arcuate': 'ARC_L',
'RightArcuate': 'ARC_R', 'LeftArcuate': 'ARC_L',
'Right Thalamic Radiation': 'ATR_R', 'Left Thalamic Radiation': 'ATR_L',
'RightThalamicRadiation': 'ATR_R', 'LeftThalamicRadiation': 'ATR_L',
'Right Cingulum Cingulate': 'CGC_R', 'Left Cingulum Cingulate': 'CGC_L',
'RightCingulumCingulate': 'CGC_R', 'LeftCingulumCingulate': 'CGC_L',
'Right Cingulum Hippocampus': 'HCC_R',
'Left Cingulum Hippocampus': 'HCC_L',
'RightCingulumHippocampus': 'HCC_R',
'LeftCingulumHippocampus': 'HCC_L',
'Callosum Forceps Major': 'FP', 'Callosum Forceps Minor': 'FA',
'CallosumForcepsMajor': 'FP', 'CallosumForcepsMinor': 'FA',
'Right ILF': 'ILF_R', 'Left ILF': 'ILF_L',
'RightILF': 'ILF_R', 'LeftILF': 'ILF_L',
'Right SLF': 'SLF_R', 'Left SLF': 'SLF_L',
'RightSLF': 'SLF_R', 'LeftSLF': 'SLF_L'}
afq_home = op.join(op.expanduser('~'), 'AFQ_data')
baseurl = "https://ndownloader.figshare.com/files/"
callosum_fnames = ["Callosum_midsag.nii.gz",
"L_AntFrontal.nii.gz",
"L_Motor.nii.gz",
"L_Occipital.nii.gz",
"L_Orbital.nii.gz",
"L_PostParietal.nii.gz",
"L_SupFrontal.nii.gz",
"L_SupParietal.nii.gz",
"L_Temporal.nii.gz",
"R_AntFrontal.nii.gz",
"R_Motor.nii.gz",
"R_Occipital.nii.gz",
"R_Orbital.nii.gz",
"R_PostParietal.nii.gz",
"R_SupFrontal.nii.gz",
"R_SupParietal.nii.gz",
"R_Temporal.nii.gz"]
callosum_remote_fnames = ["5273794", "5273797", "5273800", "5273803",
"5273806", "5273809", "5273812", "5273815",
"5273821", "5273818", "5273824", "5273827",
"5273830", "5273833", "5273836", "5273839",
"5273842"]
callosum_md5_hashes = ["709fa90baadeacd64f1d62b5049a4125",
"987c6169de807c4e93dc2cbd7a25d506",
"0da114123d0b0097b96fe450a459550b",
"6d845bd10504f67f1dc17f9000076d7e",
"e16c7873ef4b08d26b77ef746dab8237",
"47193fd4df1ea17367817466de798b90",
"7e78bf9671e6945f4b2f5e7c30595a3c",
"8adbb947377ff7b484c88d8c0ffc2125",
"0fd981a4d0847e0642ff96e84fe44e47",
"87c4855efa406d8fb004cffb8259180e",
"c7969bcf5f2343fd9ce9c49b336cf14c",
"bb4372b88991932150205ffb22aa6cb7",
"d198d4e7db18ddc7236cf143ecb8342e",
"d0f6edef64b0c710c92e634496085dda",
"85eaee44665f244db5adae2e259833f6",
"25f24eb22879a05d12bda007c81ea55a",
"2664e0b8c2d9c59f13649a89bfcce399"]
fetch_callosum_templates = _make_fetcher("fetch_callosum_templates",
op.join(afq_home,
'callosum_templates'),
baseurl, callosum_remote_fnames,
callosum_fnames,
md5_list=callosum_md5_hashes,
doc="Download AFQ callosum templates")
def read_callosum_templates(resample_to=False):
"""Load AFQ callosum templates from file
Returns
-------
dict with: keys: names of template ROIs and values: nibabel Nifti1Image
objects from each of the ROI nifti files.
"""
logger = logging.getLogger('AFQ.data')
files, folder = fetch_callosum_templates()
logger.debug('loading callosum templates')
tic = time.perf_counter()
template_dict = {}
for f in files:
img = nib.load(op.join(folder, f))
if resample_to:
if isinstance(resample_to, str):
resample_to = nib.load(resample_to)
img = nib.Nifti1Image(resample(img.get_fdata(),
resample_to,
img.affine,
resample_to.affine).get_fdata(),
resample_to.affine)
template_dict[f.split('.')[0]] = img
toc = time.perf_counter()
logger.debug(f'callosum templates loaded in {toc - tic:0.4f} seconds')
return template_dict
def read_resample_roi(roi, resample_to=None, threshold=False):
"""
Reads an roi from file-name/img and resamples it to conform with
another file-name/img.
Parameters
----------
roi : str or nibabel image class instance.
Should contain a binary volume with 1s in the region of interest and
0s elsewhere.
resample_to : str or nibabel image class instance, optional
A template image to resample to. Typically, this should be the
template to which individual-level data are registered. Defaults to
the MNI template.
threshold: bool or float
If set to False (default), resampled result is returned. Otherwise,
the resampled result is thresholded at this value and binarized.
This is not applied if the input ROI is already in the space of the
output.
Returns
-------
nibabel image class instance that contains the binary ROI resampled into
the requested space.
"""
if isinstance(roi, str):
roi = nib.load(roi)
if resample_to is None:
resample_to = read_mni_template()
if isinstance(resample_to, str):
resample_to = nib.load(resample_to)
if np.allclose(resample_to.affine, roi.affine):
return roi
as_array = resample(
roi.get_fdata(),
resample_to,
roi.affine,
resample_to.affine).get_fdata()
if threshold:
as_array = (as_array > threshold).astype(int)
img = nib.Nifti1Image(
as_array,
resample_to.affine)
return img
template_fnames = ["ATR_roi1_L.nii.gz",
"ATR_roi1_R.nii.gz",
"ATR_roi2_L.nii.gz",
"ATR_roi2_R.nii.gz",
"ATR_L_prob_map.nii.gz",
"ATR_R_prob_map.nii.gz",
"CGC_roi1_L.nii.gz",
"CGC_roi1_R.nii.gz",
"CGC_roi2_L.nii.gz",
"CGC_roi2_R.nii.gz",
"CGC_L_prob_map.nii.gz",
"CGC_R_prob_map.nii.gz",
"CST_roi1_L.nii.gz",
"CST_roi1_R.nii.gz",
"CST_roi2_L.nii.gz",
"CST_roi2_R.nii.gz",
"CST_L_prob_map.nii.gz",
"CST_R_prob_map.nii.gz",
"FA_L.nii.gz",
"FA_R.nii.gz",
"FA_prob_map.nii.gz",
"FP_L.nii.gz",
"FP_R.nii.gz",
"FP_prob_map.nii.gz",
"HCC_roi1_L.nii.gz",
"HCC_roi1_R.nii.gz",
"HCC_roi2_L.nii.gz",
"HCC_roi2_R.nii.gz",
"HCC_L_prob_map.nii.gz",
"HCC_R_prob_map.nii.gz",
"IFO_roi1_L.nii.gz",
"IFO_roi1_R.nii.gz",
"IFO_roi2_L.nii.gz",
"IFO_roi2_R.nii.gz",
"IFO_L_prob_map.nii.gz",
"IFO_R_prob_map.nii.gz",
"ILF_roi1_L.nii.gz",
"ILF_roi1_R.nii.gz",
"ILF_roi2_L.nii.gz",
"ILF_roi2_R.nii.gz",
"ILF_L_prob_map.nii.gz",
"ILF_R_prob_map.nii.gz",
"SLF_roi1_L.nii.gz",
"SLF_roi1_R.nii.gz",
"SLF_roi2_L.nii.gz",
"SLF_roi2_R.nii.gz",
"SLFt_roi2_L.nii.gz",
"SLFt_roi2_R.nii.gz",
"SLF_L_prob_map.nii.gz",
"SLF_R_prob_map.nii.gz",
"UNC_roi1_L.nii.gz",
"UNC_roi1_R.nii.gz",
"UNC_roi2_L.nii.gz",
"UNC_roi2_R.nii.gz",
"UNC_L_prob_map.nii.gz",
"UNC_R_prob_map.nii.gz",
"ARC_L_prob_map.nii.gz",
"ARC_R_prob_map.nii.gz"]
template_remote_fnames = ["5273680", "5273683", "5273686", "5273689",
"11458274", "11458277",
"5273695", "5273692", "5273698", "5273701",
"11458268", "11458271",
"5273704", "5273707", "5273710", "5273713",
"11458262", "11458265",
"5273716", "5273719",
"11458220",
"5273722", "5273725",
"11458226",
"5273728", "5273731", "5273734", "5273746",
"11458259", "11458256",
"5273737", "5273740", "5273743", "5273749",
"11458250", "11458253",
"5273752", "5273755", "5273758", "5273761",
"11458244", "11458247",
"5273764", "5273767", "5273770", "5273773",
"5273776", "5273791",
"11458238", "11458241",
"5273779", "5273782", "5273785", "5273788",
"11458223", "11458229",
"11458232", "11458235"]
template_md5_hashes = ["6b7aaed1a2982fd0ea436a223133908b",
"fd60d46d4e3cbd906c86e4c9e4fd6e2a",
"3aba60b169a35c38640de4ec29d362c8",
"12716a5688a1809fbaed1d58d2e68b59",
"c5637f471df861d9bbb45604db34770b",
"850cc4c04d7241747063fe3cd440b2ce",
"8e8973bc7838c8744914d402f52d91ca",
"c5fa4e6e685e695c006823b6784d2407",
"e1fab77f21d5303ed52285f015e24f0b",
"5f89defec3753fd75cd688c7bfb20a36",
"a4f3cd65b06fb25f63d5dab7592f00f2",
"7e73ab02db30a3ad6bd9e82148c2486e",
"f9db3154955a20b67c2dda758800d14c",
"73941510c798c1ed1b03e2bd481cd5c7",
"660cdc031ee0716d60159c7d933119ea",
"660cdc031ee0716d60159c7d933119ea",
"fd012bc89f6bed7bd54530195496bac4",
"3406906a86e633cc102127cf210a1063",
"9040a7953dcbbf131d135c866182d8ef",
"a72e17194824fcd838a594a2eb50c72e",
"627d7bb2e6d55f8243da815a36d9ff1a",
"55adbe9b8279185eedbe342149e1ff90",
"5a7412a3cf0fb185eec53d1989df2f7c",
"1aa36e83ae7b5555bb19d776ede9c18d",
"ba453196ff179b0e31172806e313b52c",
"d85c6574526b296935f34bf4f65cd493",
"9b81646317f59c7db087f27e2f85679e",
"9806e82c250e4604534b96917f87b7e8",
"213d3fb1ccd756d878f9b50b765b1c8f",
"f1e7e6bc51aa0aa279c54fb3805fb5e3",
"0e68a9feaaddcc9b4d667c2f15903368",
"d45020a87ee4bb496edd350631d91f6a",
"75c2c911826ec4b23159f9bd80e3c039",
"55d616ea9e0c646adc1aafa0f5fbe625",
"dee83fa6b03cfa5e0f5c965953aa6778",
"a13eef7059c98568adfefbab660e434e",
"045b7d5c6341997f3f0120c3a4212ad8",
"d174b1359ba982b03840436c93b7bbb4",
"fff9753f394fc4c73fb2ae40b3b4dde0",
"cd278b4dd6ff77481ea9ac16485a5ae2",
"7bdf5111265107091c7a2fca9215de30",
"7d4a43714504e6e930f922c9bc2a13d5",
"af2bcedf47e193686af329b9a8e259da",
"9a1122943579d11ba169d3ad87a75625",
"627903f7a06627bfd4153dc9245fa390",
"1714cd7f989c3435bdd5a2076e6272a0",
"1fa2114049707a4e05b53f9d95730375",
"b6663067d5ea53c70cb8803948f8adf7",
"d3e068997ebc60407bd6e9576e47dede",
"27ecfbd1d2f98213e52d73b7d70fe0e7",
"fa141bb2d951bec486916acda3652d95",
"d391d073e86e28588be9a6d01b2e7a82",
"a3e085562e6b8111c7ebc358f9450c8b",
"d65c67910807504735e034f7ea92d590",
"93cb24a9128db1a6c34a09eaf79fe7f0",
"71a7455cb4062dc39b1677c118c7b5a5",
"19590c712f1776da1fdba64d4eb7f1f6",
"04d5af0feb2c1b5b52a87ccbbf148e4b",
"53c277be990d00f7de04f2ea35e74d73"]
fetch_templates = _make_fetcher("fetch_templates",
op.join(afq_home, 'templates'),
baseurl, template_remote_fnames,
template_fnames, md5_list=template_md5_hashes,
doc="Download AFQ templates")
def read_templates(resample_to=False):
"""Load AFQ templates from file
Returns
-------
dict with: keys: names of template ROIs and values: nibabel Nifti1Image
objects from each of the ROI nifti files.
"""
logger = logging.getLogger('AFQ.data')
files, folder = fetch_templates()
logger.debug('loading AFQ templates')
tic = time.perf_counter()
template_dict = {}
for f in files:
img = nib.load(op.join(folder, f))
if resample_to:
if isinstance(resample_to, str):
resample_to = nib.load(resample_to)
img = nib.Nifti1Image(
resample(
img.get_fdata(),
resample_to,
img.affine,
resample_to.affine).get_fdata(),
resample_to.affine)
template_dict[f.split('.')[0]] = img
toc = time.perf_counter()
logger.debug(f'AFQ templates loaded in {toc - tic:0.4f} seconds')
return template_dict
# +----------------------------------------------------+
# | Begin S3BIDSStudy classes and supporting functions |
# +----------------------------------------------------+
def get_s3_client(anon=True):
"""Return a boto3 s3 client
Global boto clients are not thread safe so we use this function
to return independent session clients for different threads.
Parameters
----------
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config files,
EC2 IAM server, in that order). Default: True
Returns
-------
s3_client : boto3.client('s3')
"""
session = boto3.session.Session()
if anon:
s3_client = session.client(
's3',
config=Config(signature_version=UNSIGNED)
)
else:
s3_client = session.client('s3')
return s3_client
def _ls_s3fs(s3_prefix, anon=True):
"""Returns a dict of list of files using s3fs
The files are divided between subject directories/files and
non-subject directories/files.
Parameters
----------
s3_prefix : str
AWS S3 key for the study or site "directory" that contains all
of the subjects
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config files,
EC2 IAM server, in that order). Default: True
Returns
-------
subjects : dict
"""
fs = s3fs.S3FileSystem(anon=anon)
site_files = fs.ls(s3_prefix, detail=False)
# Just need BIDSLayout for the `parse_file_entities` method
# so we can pass dev/null as the argument
layout = BIDSLayout(os.devnull, validate=False)
entities = [
layout.parse_file_entities(f) for f in site_files
]
files = {
'subjects': [
f for f, e in zip(site_files, entities)
if e.get('subject') is not None
],
'other': [
f for f, e in zip(site_files, entities)
if e.get('subject') is None
]
}
return files
def _get_matching_s3_keys(bucket, prefix='', suffix='', anon=True):
"""Generate all the matching keys in an S3 bucket.
Parameters
----------
bucket : str
Name of the S3 bucket
prefix : str, optional
Only fetch keys that start with this prefix
suffix : str, optional
Only fetch keys that end with this suffix
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config files,
EC2 IAM server, in that order). Default: True
Yields
------
key : list
S3 keys that match the prefix and suffix
"""
s3 = get_s3_client(anon=anon)
kwargs = {'Bucket': bucket, 'MaxKeys': 1000}
# If the prefix is a single string (not a tuple of strings), we can
# do the filtering directly in the S3 API.
if isinstance(prefix, str) and prefix:
kwargs['Prefix'] = prefix
while True:
# The S3 API response is a large blob of metadata.
# 'Contents' contains information about the listed objects.
resp = s3.list_objects_v2(**kwargs)
try:
contents = resp['Contents']
except KeyError:
return
for obj in contents:
key = obj['Key']
if key.startswith(prefix) and key.endswith(suffix):
yield key
# The S3 API is paginated, returning up to 1000 keys at a time.
# Pass the continuation token into the next response, until we
# reach the final page (when this field is missing).
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
def _download_from_s3(fname, bucket, key, overwrite=False, anon=True):
"""Download object from S3 to local file
Parameters
----------
fname : str
File path to which to download the object
bucket : str
S3 bucket name
key : str
S3 key for the object to download
overwrite : bool
If True, overwrite file if it already exists.
If False, skip download and return. Default: False
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config files,
EC2 IAM server, in that order). Default: True
"""
# Create the directory and file if necessary
fs = s3fs.S3FileSystem(anon=anon)
if overwrite or not op.exists(fname):
Path(op.dirname(fname)).mkdir(parents=True, exist_ok=True)
fs.get("/".join([bucket, key]), fname)
class S3BIDSSubject:
"""A single study subject hosted on AWS S3"""
def __init__(self, subject_id, study):
"""Initialize a Subject instance
Parameters
----------
subject_id : str
Subject-ID for this subject
study : AFQ.data.S3BIDSStudy
The S3BIDSStudy for which this subject was a participant
"""
logging.getLogger("botocore").setLevel(logging.WARNING)
if not isinstance(subject_id, str):
raise TypeError('subject_id must be a string.')
if not isinstance(study, S3BIDSStudy):
raise TypeError('study must be an instance of S3BIDSStudy.')
self._subject_id = subject_id
self._study = study
self._get_s3_keys()
self._files = {'raw': {}, 'derivatives': {}}
@property
def subject_id(self):
"""An identifier string for the subject"""
return self._subject_id
@property
def study(self):
"""The study in which this subject participated"""
return self._study
@property
def s3_keys(self):
"""A dict of S3 keys for this subject's data
The S3 keys are divided between "raw" data and derivatives
"""
return self._s3_keys
@property
def files(self):
"""Local files for this subject's dMRI data
Before the call to subject.download(), this is None.
Afterward, the files are stored in a dict with keys
for each Amazon S3 key and values corresponding to
the local file.
"""
return self._files
def __repr__(self):
return (f'{type(self).__name__}(subject_id={self.subject_id}, '
f'study_id={self.study.study_id}')
def _get_s3_keys(self):
"""Get all required S3 keys for this subject
Returns
-------
s3_keys : dict
S3 keys organized into "raw" and "derivatives" lists
"""
prefixes = {
'raw': '/'.join([self.study.s3_prefix,
self.subject_id]).lstrip('/'),
'derivatives': {
dt: '/'.join([
*dt.split('/')[1:], # removes bucket name
self.subject_id
]).lstrip('/') for dt in self.study.derivative_types
},
}
s3_keys = {
'raw': list(set(_get_matching_s3_keys(
bucket=self.study.bucket,
prefix=prefixes['raw'],
anon=self.study.anon,
))),
'derivatives': {
dt: list(set(_get_matching_s3_keys(
bucket=self.study.bucket,
prefix=prefixes['derivatives'][dt],
anon=self.study.anon,
))) for dt in self.study.derivative_types
}
}
self._s3_keys = s3_keys
def download(self, directory, include_derivs=False, overwrite=False,
suffix=None, pbar=True, pbar_idx=0):
"""Download files from S3
Parameters
----------
directory : str
Directory to which to download subject files
include_derivs : bool or str
If True, download all derivatives files. If False, do not.
If a string or sequence of strings is passed, this will
only download derivatives that match the string(s) (e.g.
['dmriprep', 'afq']). Default: False
overwrite : bool
If True, overwrite files for each subject. Default: False
suffix : str
Suffix, including extension, of file(s) to download.
Default: None
pbar : bool
If True, include download progress bar. Default: True
pbar_idx : int
Progress bar index for multithreaded progress bars. Default: 0
"""
if not isinstance(directory, str):
raise TypeError('directory must be a string.')
if not (isinstance(include_derivs, bool)
or isinstance(include_derivs, str)
or all(isinstance(s, str) for s in include_derivs)):
raise TypeError('include_derivs must be a boolean, a '
'string, or a sequence of strings.')
if not isinstance(overwrite, bool):
raise TypeError('overwrite must be a boolean.')
if (suffix is not None) and not(isinstance(suffix, str)):
raise TypeError('suffix must be a string.')
if not isinstance(pbar, bool):
raise TypeError('pbar must be a boolean.')
if not isinstance(pbar_idx, int):
raise TypeError('pbar_idx must be an integer.')
def split_key(key):
if self.study.s3_prefix:
return key.split(self.study.s3_prefix)[-1]
else:
return key
# Filter out keys that do not end with suffix
if suffix is not None:
s3_keys_raw = [
s3key for s3key in self.s3_keys['raw']
if s3key.endswith(suffix)
]
s3_keys_deriv = {
dt: [
s3key for s3key in s3keys if s3key.endswith(suffix)
] for dt, s3keys in self.s3_keys['derivatives'].items()
}
else:
s3_keys_raw = self.s3_keys['raw']
s3_keys_deriv = self.s3_keys['derivatives']
files = {
'raw': [
op.abspath(op.join(
directory,
split_key(key).lstrip('/')
)) for key in s3_keys_raw
],
'derivatives': {
dt: [
op.abspath(op.join(
directory,
split_key(s3key).lstrip('/')
)) for s3key in s3keys
] for dt, s3keys in s3_keys_deriv.items()
}
}
raw_zip = list(zip(s3_keys_raw, files['raw']))
# Populate files parameter
self._files["raw"].update({k: f for k, f in raw_zip})
# Generate list of (key, file) tuples
download_pairs = [(k, f) for k, f in raw_zip]
deriv_zips = {
dt: list(zip(
s3keys, files['derivatives'][dt]
)) for dt, s3keys in s3_keys_deriv.items()
}
deriv_pairs = []
for dt in files['derivatives'].keys():
if include_derivs is True:
# In this case, include all derivatives files
deriv_pairs += [(k, f) for k, f in deriv_zips[dt]]
self._files['derivatives'][dt] = {
k: f for k, f in deriv_zips[dt]
}
elif include_derivs is False:
pass
elif (isinstance(include_derivs, str)
# In this case, filter only derivatives S3 keys that
# include the `include_derivs` string as a substring
and include_derivs in dt):
deriv_pairs += [(k, f) for k, f in deriv_zips[dt]]
self._files['derivatives'][dt] = {
k: f for k, f in deriv_zips[dt]
}
elif (all(isinstance(s, str) for s in include_derivs)
and any([deriv in dt for deriv in include_derivs])):
# In this case, filter only derivatives S3 keys that
# include any of the `include_derivs` strings as a
# substring
deriv_pairs += [(k, f) for k, f in deriv_zips[dt]]
self._files['derivatives'][dt] = {
k: f for k, f in deriv_zips[dt]
}
if include_derivs is not False:
download_pairs += deriv_pairs
# Now iterate through the list and download each item
if pbar:
progress = tqdm(desc=f'Download {self.subject_id}',
position=pbar_idx,
total=len(download_pairs) + 1)
for (key, fname) in download_pairs:
_download_from_s3(fname=fname,
bucket=self.study.bucket,
key=key,
overwrite=overwrite,
anon=self.study.anon)
if pbar:
progress.update()
if pbar:
progress.update()
progress.close()
class HBNSubject(S3BIDSSubject):
"""A subject in the HBN study
See Also
--------
AFQ.data.S3BIDSSubject
"""
def __init__(self, subject_id, study, site=None):
"""Initialize a Subject instance
Parameters
----------
subject_id : str
Subject-ID for this subject
study : AFQ.data.S3BIDSStudy
The S3BIDSStudy for which this subject was a participant
site : str, optional
Site-ID for the site from which this subject's data was collected
"""
if not (site is None or isinstance(site, str)):
raise TypeError('site must be a string or None.')
self._site = site
super().__init__(
subject_id=subject_id,
study=study
)
@property
def site(self):
"""The site at which this subject was a participant"""
return self._site
def __repr__(self):
return (f'{type(self).__name__}(subject_id={self.subject_id}, '
f'study_id={self.study.study_id}, site={self.site}')
def _get_s3_keys(self):
"""Get all required S3 keys for this subject
Returns
-------
s3_keys : dict
S3 keys organized into "raw" and "derivatives" lists
"""
prefixes = {
'raw': '/'.join([self.study.s3_prefix,
self.subject_id]).lstrip('/'),
'derivatives': '/'.join([
self.study.s3_prefix,
'derivatives',
self.subject_id
]).lstrip('/')
}
s3_keys = {
datatype: list(set(_get_matching_s3_keys(
bucket=self.study.bucket,
prefix=prefix,
anon=self.study.anon,
))) for datatype, prefix in prefixes.items()
}
def get_deriv_type(s3_key):
after_sub = s3_key.split('/' + self.subject_id + '/')[-1]
deriv_type = after_sub.split('/')[0]
deriv_keys = {
dt: [
s3key for s3key in s3_keys['derivatives']
if dt == get_deriv_type(s3key)
] for dt in self.study.derivative_types
}
s3_keys['derivatives'] = deriv_keys
self._s3_keys = s3_keys
class S3BIDSStudy:
"""A BIDS-compliant study hosted on AWS S3"""
def __init__(self, study_id, bucket, s3_prefix, subjects=None,
anon=True, use_participants_tsv=False, random_seed=None,
_subject_class=S3BIDSSubject):
"""Initialize an S3BIDSStudy instance
Parameters
----------
study_id : str
An identifier string for the study
bucket : str
The S3 bucket that contains the study data
s3_prefix : str
The S3 prefix common to all of the study objects on S3
subjects : str, sequence(str), int, or None
If int, retrieve S3 keys for the first `subjects` subjects.
If "all", retrieve all subjects. If str or sequence of
strings, retrieve S3 keys for the specified subjects. If sequence
of ints, then for each int n retrieve S3 keys for the nth subject.
If None, retrieve S3 keys for the first subject. Default: None
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config
files, EC2 IAM server, in that order). Default: True
use_participants_tsv : bool
If True, use the particpants tsv files to retrieve subject
identifiers. This is faster but may not catch all subjects.
Sometimes the tsv files are outdated. Default: False
random_seed : int or None
Random seed for selection of subjects if `subjects` is an
integer. Use the same random seed for reproducibility.
Default: None
_subject_class : object
The subject class to be used for this study. This parameter
has a leading underscore because you probably don't want
to change it. If you do change it, you must provide a
class that quacks like AFQ.data.S3BIDSSubject. Default:
S3BIDSSubject
"""
logging.getLogger("botocore").setLevel(logging.WARNING)
if not isinstance(study_id, str):
raise TypeError('`study_id` must be a string.')
if not isinstance(bucket, str):
raise TypeError('`bucket` must be a string.')
if not isinstance(s3_prefix, str):
raise TypeError('`s3_prefix` must be a string.')
if not (subjects is None
or isinstance(subjects, int)
or isinstance(subjects, str)
or all(isinstance(s, str) for s in subjects)
or all(isinstance(s, int) for s in subjects)):
raise TypeError('`subjects` must be an int, string, '
'sequence of strings, or a sequence of ints.')
if not isinstance(anon, bool):
raise TypeError('`anon` must be of type bool.')
if isinstance(subjects, int) and subjects < 1:
raise ValueError('If `subjects` is an int, it must be '
'greater than 0.')
if not isinstance(use_participants_tsv, bool):
raise TypeError('`use_participants_tsv` must be boolean.')
if not (random_seed is None or isinstance(random_seed, int)):
raise TypeError("`random_seed` must be an integer.")
self._study_id = study_id
self._bucket = bucket
self._s3_prefix = s3_prefix
self._use_participants_tsv = use_participants_tsv
self._random_seed = random_seed
self._anon = anon
self._subject_class = _subject_class
self._local_directories = []
# Get a list of all subjects in the study
self._all_subjects = self._list_all_subjects()
self._derivative_types = self._get_derivative_types()
self._non_subject_s3_keys = self._get_non_subject_s3_keys()
# Convert `subjects` into a sequence of subjectID strings
if subjects is None or isinstance(subjects, int) \
or (isinstance(subjects, list)
and isinstance(subjects[0], int)):
# if subjects is an int, get that many random subjects
prng = np.random.RandomState(random_seed)
randomized_subjects = sorted(self._all_subjects.copy())
prng.shuffle(randomized_subjects)
if subjects is None:
subjects = randomized_subjects[0]
elif isinstance(subjects, int):
subjects = randomized_subjects[:subjects]
else:
subjects = [randomized_subjects[i] for i in subjects]
if isinstance(subjects, str):
subjects = [subjects]
elif subjects == 'all':
# if "all," retrieve all subjects
subjects = sorted(self._all_subjects)
elif isinstance(subjects, str):
# if a string, just get that one subject
subjects = [subjects]
# The last case for subjects is what we want. No transformation needed.
if not set(subjects) <= set(self._all_subjects):
raise ValueError(
f'The following subjects could not be found in the study: '
f'{set(subjects) - set(self._all_subjects)}'
)
subs = [
delayed(self._get_subject)(s) for s in set(subjects)
]
print('Retrieving subject S3 keys')
with ProgressBar():
subjects = list(compute(*subs, scheduler='threads'))
self._subjects = subjects
@property
def study_id(self):
"""An identifier string for the study"""
return self._study_id
@property
def bucket(self):
"""The S3 bucket that contains the study data"""
return self._bucket
@property
def s3_prefix(self):
"""The S3 prefix common to all of the study objects on S3"""
return self._s3_prefix
@property
def subjects(self):
"""A list of Subject instances for each requested subject"""
return self._subjects
@property
def anon(self):
"""Is this study using an anonymous S3 connection?"""
return self._anon
@property
def derivative_types(self):
"""A list of derivative pipelines available in this study"""
return self._derivative_types
@property
def non_sub_s3_keys(self):
"""A dict of S3 keys that are not in subject directories"""
return self._non_subject_s3_keys
@property
def local_directories(self):
"""A list of local directories to which this study has been downloaded"""
return self._local_directories
@property
def use_participants_tsv(self):
"""Did we use a participants.tsv file to populate the list of
study subjects."""
return self._use_participants_tsv
@property
def random_seed(self):
"""The random seed used to retrieve study subjects"""
return self._random_seed
def __repr__(self):
return (f'{type(self).__name__}(study_id={self.study_id}, '
f'bucket={self.bucket}, s3_prefix={self.s3_prefix})')
def _get_subject(self, subject_id):
"""Return a Subject instance from a subject-ID"""
return self._subject_class(subject_id=subject_id,
study=self)
def _get_derivative_types(self):
"""Return a list of available derivatives pipelines
Returns
-------
list
list of available derivatives pipelines
"""
s3_prefix = '/'.join([self.bucket, self.s3_prefix]).rstrip("/")
nonsub_keys = _ls_s3fs(s3_prefix=s3_prefix,
anon=self.anon)['other']
derivatives_prefix = '/'.join([s3_prefix, 'derivatives'])
if derivatives_prefix in nonsub_keys:
return _ls_s3fs(
s3_prefix=derivatives_prefix,
anon=self.anon
)['other']
else:
return []
def _get_non_subject_s3_keys(self):
"""Return a list of 'non-subject' files
In this context, a 'non-subject' file is any file
or directory that is not a subject ID folder
Returns
-------
dict
dict with keys 'raw' and 'derivatives' and whose values
are lists of S3 keys for non-subject files
"""
non_sub_s3_keys = {}
s3_prefix = '/'.join([self.bucket, self.s3_prefix]).rstrip("/")
nonsub_keys = _ls_s3fs(s3_prefix=s3_prefix,
anon=self.anon)['other']
nonsub_keys = [k for k in nonsub_keys
if not k.endswith('derivatives')]
nonsub_deriv_keys = []
for dt in self.derivative_types:
nonsub_deriv_keys.append(_ls_s3fs(
s3_prefix=dt,
anon=self.anon
)['other'])
non_sub_s3_keys = {
'raw': nonsub_keys,
'derivatives': nonsub_deriv_keys,
}
return non_sub_s3_keys
def _list_all_subjects(self):
"""Return list of subjects
Returns
-------
list
list of participant_ids
"""
if self._use_participants_tsv:
tsv_key = "/".join([self.s3_prefix,
"participants.tsv"]).lstrip("/")
s3 = get_s3_client(anon=self.anon)
def get_subs_from_tsv_key(s3_key):
response = s3.get_object(
Bucket=self.bucket,
Key=s3_key
)
return set(pd.read_csv(
response.get('Body')
).participant_id.values)
subject_set = get_subs_from_tsv_key(tsv_key)
subjects = list(subject_set)
else:
s3_prefix = '/'.join([self.bucket, self.s3_prefix]).rstrip("/")
sub_keys = _ls_s3fs(s3_prefix=s3_prefix,
anon=self.anon)['subjects']
# Just need BIDSLayout for the `parse_file_entities`
# method so we can pass dev/null as the argument
layout = BIDSLayout(os.devnull, validate=False)
subjects = []
for key in sub_keys:
entities = layout.parse_file_entities(key)
subjects.append('sub-' + entities.get('subject'))
return list(set(subjects))
def _download_non_sub_keys(self, directory,
select=("dataset_description.json",),
filenames=None):
fs = s3fs.S3FileSystem(anon=self.anon)
if filenames is None:
filenames = self.non_sub_s3_keys['raw']
for fn in filenames:
if select == "all" or any([s in fn for s in select]):
Path(directory).mkdir(parents=True, exist_ok=True)
fs.get(fn, op.join(directory, op.basename(fn)))
def _download_derivative_descriptions(self, include_derivs, directory):
for derivative in self.derivative_types:
if include_derivs is True \
or (isinstance(include_derivs, str)
and include_derivs == op.basename(derivative)) \
or (isinstance(include_derivs, list)
and all(isinstance(s, str) for s in include_derivs)
and any([deriv in derivative for
deriv in include_derivs])):
filenames = \
_ls_s3fs(s3_prefix=derivative, anon=self.anon)['other']
deriv_directory = op.join(
directory, *derivative.split('/')[-2:])
self._download_non_sub_keys(
deriv_directory,
select=("dataset_description.json",),
filenames=filenames)
def download(self, directory,
include_modality_agnostic=("dataset_description.json",),
include_derivs=False,
include_derivs_dataset_description=True,
suffix=None,
overwrite=False, pbar=True):
"""Download files for each subject in the study
Parameters
----------
directory : str
Directory to which to download subject files
include_modality_agnostic : bool, "all" or any subset of ["dataset_description.json", "CHANGES", "README", "LICENSE"]
If True or "all", download all keys in self.non_sub_s3_keys
also. If a subset of ["dataset_description.json", "CHANGES",
"README", "LICENSE"], download only those files. This is
useful if the non_sub_s3_keys contain files common to all
subjects that should be inherited. Default: ("dataset_description.json",)
include_derivs : bool or str
If True, download all derivatives files. If False, do not.
If a string or sequence of strings is passed, this will
only download derivatives that match the string(s) (e.g.
["dmriprep", "afq"]). Default: False
include_derivs_dataset_description : bool
Used only if include_derivs is not False. If True,
dataset_description.json downloaded for each derivative.
suffix : str
Suffix, including extension, of file(s) to download.
Default: None
overwrite : bool
If True, overwrite files for each subject. Default: False
pbar : bool
If True, include progress bar. Default: True
See Also
--------
AFQ.data.S3BIDSSubject.download
"""
self._local_directories.append(directory)
self._local_directories = list(set(self._local_directories))
if include_modality_agnostic is True or include_modality_agnostic == "all":
self._download_non_sub_keys(directory, select="all")
elif include_modality_agnostic is not False:
# Subset selection
valid_set = {"dataset_description.json",
"CHANGES",
"README",
"LICENSE"}
if not set(include_modality_agnostic) <= valid_set:
raise ValueError(
"include_modality_agnostic must be either a boolean, 'all', "
"or a subset of {valid_set}".format(valid_set=valid_set)
)
self._download_non_sub_keys(
directory, select=include_modality_agnostic)
# download dataset_description.json for derivatives
if (include_derivs is not False) \
and include_derivs_dataset_description:
self._download_derivative_descriptions(
include_derivs, directory)
results = [delayed(sub.download)(
directory=directory,
include_derivs=include_derivs,
suffix=suffix,
overwrite=overwrite,
pbar=pbar,
pbar_idx=idx,
) for idx, sub in enumerate(self.subjects)]
compute(*results, scheduler='threads')
class HBNSite(S3BIDSStudy):
"""An HBN study site
See Also
--------
AFQ.data.S3BIDSStudy
"""
def __init__(self, site, study_id='HBN', bucket='fcp-indi',
s3_prefix='data/Projects/HBN/MRI',
subjects=None, use_participants_tsv=False,
random_seed=None):
"""Initialize the HBN site
Parameters
----------
site : ["Site-SI", "Site-RU", "Site-CBIC", "Site-CUNY"]
The HBN site
study_id : str
An identifier string for the site
bucket : str
The S3 bucket that contains the study data
s3_prefix : str
The S3 prefix common to all of the study objects on S3
subjects : str, sequence(str), int, or None
If int, retrieve S3 keys for the first `subjects` subjects.
If "all", retrieve all subjects. If str or sequence of
strings, retrieve S3 keys for the specified subjects. If
None, retrieve S3 keys for the first subject. Default: None
use_participants_tsv : bool
If True, use the particpants tsv files to retrieve subject
identifiers. This is faster but may not catch all subjects.
Sometimes the tsv files are outdated. Default: False
random_seed : int or None
Random seed for selection of subjects if `subjects` is an
integer. Use the same random seed for reproducibility.
Default: None
"""
valid_sites = ["Site-SI", "Site-RU", "Site-CBIC", "Site-CUNY"]
if site not in valid_sites:
raise ValueError(
"site must be one of {}.".format(valid_sites)
)
self._site = site
super().__init__(
study_id=study_id,
bucket=bucket,
s3_prefix='/'.join([s3_prefix, site]),
subjects=subjects,
use_participants_tsv=use_participants_tsv,
random_seed=random_seed,
_subject_class=HBNSubject
)
@property
def site(self):
"""The HBN site"""
return self._site
def _get_subject(self, subject_id):
"""Return a Subject instance from a subject-ID"""
return self._subject_class(subject_id=subject_id,
study=self,
site=self.site)
def _get_derivative_types(self):
"""Return a list of available derivatives pipelines
The HBN dataset is not BIDS compliant so to go a list
of available derivatives, we must peak inside every
directory in `derivatives/sub-XXXX/`
Returns
-------
list
list of available derivatives pipelines
"""
s3_prefix = '/'.join([self.bucket, self.s3_prefix]).rstrip("/")
nonsub_keys = _ls_s3fs(s3_prefix=s3_prefix,
anon=self.anon)['other']
derivatives_prefix = '/'.join([s3_prefix, 'derivatives'])
if any([derivatives_prefix in key for key in nonsub_keys]):
deriv_subs = _ls_s3fs(
s3_prefix=derivatives_prefix,
anon=self.anon
)['subjects']
deriv_types = []
for sub_key in deriv_subs:
deriv_types += [
s.split(sub_key)[-1].lstrip('/')
for s in _ls_s3fs(
s3_prefix=sub_key,
anon=self.anon
)['subjects']
]
return list(set(deriv_types))
else:
return []
def _get_non_subject_s3_keys(self):
"""Return a list of 'non-subject' files
In this context, a 'non-subject' file is any file
or directory that is not a subject ID folder. This method
is different from AFQ.data.S3BIDSStudy because the HBN
dataset is not BIDS compliant
Returns
-------
dict
dict with keys 'raw' and 'derivatives' and whose values
are lists of S3 keys for non-subject files
See Also
--------
AFQ.data.S3BIDSStudy._get_non_subject_s3_keys
"""
non_sub_s3_keys = {}
s3_prefix = '/'.join([self.bucket, self.s3_prefix]).rstrip("/")
nonsub_keys = _ls_s3fs(s3_prefix=s3_prefix,
anon=self.anon)['other']
nonsub_keys = [k for k in nonsub_keys
if not k.endswith('derivatives')]
nonsub_deriv_keys = _ls_s3fs(
s3_prefix='/'.join([
self.bucket,
self.s3_prefix,
'derivatives'
]),
anon=self.anon
)['other']
non_sub_s3_keys = {
'raw': nonsub_keys,
'derivatives': nonsub_deriv_keys,
}
return non_sub_s3_keys
def download(self, directory, include_modality_agnostic=False,
include_derivs=False, overwrite=False, pbar=True):
"""Download files for each subject in the study
Parameters
----------
directory : str
Directory to which to download subject files
include_modality_agnostic : bool, "all" or any subset of ["dataset_description.json", "CHANGES", "README", "LICENSE"]
If True or "all", download all keys in self.non_sub_s3_keys
also. If a subset of ["dataset_description.json", "CHANGES",
"README", "LICENSE"], download only those files. This is
useful if the non_sub_s3_keys contain files common to all
subjects that should be inherited. Default: False
include_derivs : bool or str
If True, download all derivatives files. If False, do not.
If a string or sequence of strings is passed, this will
only download derivatives that match the string(s) (e.g.
["dmriprep", "afq"]). Default: False
overwrite : bool
If True, overwrite files for each subject. Default: False
pbar : bool
If True, include progress bar. Default: True
See Also
--------
AFQ.data.S3BIDSSubject.download
"""
super().download(
directory=directory,
include_modality_agnostic=include_modality_agnostic,
include_derivs=include_derivs,
overwrite=overwrite,
pbar=pbar
)
to_bids_description(
directory,
**{"BIDSVersion": "1.0.0",
"Name": "HBN Study, " + self.site,
"DatasetType": "raw",
"Subjects": [s.subject_id for s in self.subjects]})
# +--------------------------------------------------+
# | End S3BIDSStudy classes and supporting functions |
# +--------------------------------------------------+
def fetch_hcp(subjects,
hcp_bucket='hcp-openaccess',
profile_name="hcp",
path=None,
study='HCP_1200',
aws_access_key_id=None,
aws_secret_access_key=None):
"""
Fetch HCP diffusion data and arrange it in a manner that resembles the
BIDS [1]_ specification.
Parameters
----------
subjects : list
Each item is an integer, identifying one of the HCP subjects
hcp_bucket : string, optional
The name of the HCP S3 bucket. Default: "hcp-openaccess"
profile_name : string, optional
The name of the AWS profile used for access. Default: "hcp"
path : string, optional
Path to save files into. Default: '~/AFQ_data'
study : string, optional
Which HCP study to grab. Default: 'HCP_1200'
aws_access_key_id : string, optional
AWS credentials to HCP AWS S3. Will only be used if `profile_name` is
set to False.
aws_secret_access_key : string, optional
AWS credentials to HCP AWS S3. Will only be used if `profile_name` is
set to False.
Returns
-------
dict with remote and local names of these files,
path to BIDS derivative dataset
Notes
-----
To use this function with its default setting, you need to have a
file '~/.aws/credentials', that includes a section:
[hcp]
AWS_ACCESS_KEY_ID=XXXXXXXXXXXXXXXX
AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXXXXXX
The keys are credentials that you can get from HCP
(see https://wiki.humanconnectome.org/display/PublicData/How+To+Connect+to+Connectome+Data+via+AWS) # noqa
Local filenames are changed to match our expected conventions.
.. [1] Gorgolewski et al. (2016). The brain imaging data structure,
a format for organizing and describing outputs of neuroimaging
experiments. Scientific Data, 3::160044. DOI: 10.1038/sdata.2016.44.
"""
if profile_name:
boto3.setup_default_session(profile_name=profile_name)
elif aws_access_key_id is not None and aws_secret_access_key is not None:
boto3.setup_default_session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
else:
raise ValueError("Must provide either a `profile_name` or ",
"both `aws_access_key_id` and ",
"`aws_secret_access_key` as input to 'fetch_hcp'")
s3 = boto3.resource('s3')
bucket = s3.Bucket(hcp_bucket)
if path is None:
if not op.exists(afq_home):
os.mkdir(afq_home)
my_path = afq_home
else:
my_path = path
base_dir = op.join(my_path, study, 'derivatives', 'dmriprep')
if not os.path.exists(base_dir):
os.makedirs(base_dir, exist_ok=True)
data_files = {}
for subject in subjects:
# We make a single session folder per subject for this case, because
# AFQ api expects session structure:
sub_dir = op.join(base_dir, f'sub-{subject}')
sess_dir = op.join(sub_dir, "ses-01")
if not os.path.exists(sub_dir):
os.makedirs(os.path.join(sess_dir, 'dwi'), exist_ok=True)
os.makedirs(os.path.join(sess_dir, 'anat'), exist_ok=True)
data_files[op.join(sess_dir, 'dwi', f'sub-{subject}_dwi.bval')] =\
f'{study}/{subject}/T1w/Diffusion/bvals'
data_files[op.join(sess_dir, 'dwi', f'sub-{subject}_dwi.bvec')] =\
f'{study}/{subject}/T1w/Diffusion/bvecs'
data_files[op.join(sess_dir, 'dwi', f'sub-{subject}_dwi.nii.gz')] =\
f'{study}/{subject}/T1w/Diffusion/data.nii.gz'
data_files[op.join(sess_dir, 'anat', f'sub-{subject}_T1w.nii.gz')] =\
f'{study}/{subject}/T1w/T1w_acpc_dc.nii.gz'
data_files[op.join(sess_dir, 'anat',
f'sub-{subject}_aparc+aseg_seg.nii.gz')] =\
f'{study}/{subject}/T1w/aparc+aseg.nii.gz'
for k in data_files.keys():
if not op.exists(k):
bucket.download_file(data_files[k], k)
# Create the BIDS dataset description file text
hcp_acknowledgements = """Data were provided by the Human Connectome Project, WU-Minn Consortium (Principal Investigators: David Van Essen and Kamil Ugurbil; 1U54MH091657) funded by the 16 NIH Institutes and Centers that support the NIH Blueprint for Neuroscience Research; and by the McDonnell Center for Systems Neuroscience at Washington University.""", # noqa
to_bids_description(op.join(my_path, study),
**{"Name": study,
"Acknowledgements": hcp_acknowledgements,
"Subjects": subjects})
# Create the BIDS derivatives description file text
to_bids_description(base_dir,
**{"Name": study,
"Acknowledgements": hcp_acknowledgements,
"PipelineDescription": {'Name': 'dmriprep'}})
return data_files, op.join(my_path, study)
stanford_hardi_tractography_remote_fnames = ["5325715", "5325718", "25289735"]
stanford_hardi_tractography_hashes = ['6f4bdae702031a48d1cd3811e7a42ef9',
'f20854b4f710577c58bd01072cfb4de6',
'294bfd1831861e8635eef8834ff18892']
stanford_hardi_tractography_fnames = ['mapping.nii.gz',
'tractography_subsampled.trk',
'full_segmented_cleaned_tractography.trk']
fetch_stanford_hardi_tractography = _make_fetcher(
"fetch_stanford_hardi_tractography",
op.join(afq_home,
'stanford_hardi_tractography'),
baseurl,
stanford_hardi_tractography_remote_fnames,
stanford_hardi_tractography_fnames,
md5_list=stanford_hardi_tractography_hashes,
doc="""Download Stanford HARDI tractography and mapping. For testing
purposes""")
def read_stanford_hardi_tractography():
"""
Reads a minimal tractography from the Stanford dataset.
"""
files, folder = fetch_stanford_hardi_tractography()
files_dict = {}
files_dict['mapping.nii.gz'] = nib.load(
op.join(afq_home,
'stanford_hardi_tractography',
'mapping.nii.gz'))
# We need the original data as reference
dwi_img, gtab = dpd.read_stanford_hardi()
files_dict['tractography_subsampled.trk'] = load_trk(
op.join(afq_home,
'stanford_hardi_tractography',
'tractography_subsampled.trk'),
dwi_img,
bbox_valid_check=False,
trk_header_check=False).streamlines
files_dict['full_segmented_cleaned_tractography.trk'] = load_trk(
op.join(
afq_home,
'stanford_hardi_tractography',
'full_segmented_cleaned_tractography.trk'),
dwi_img).streamlines
return files_dict
def to_bids_description(path, fname='dataset_description.json',
BIDSVersion="1.4.0", **kwargs):
"""Dumps a dict into a bids description at the given location"""
kwargs.update({"BIDSVersion": BIDSVersion})
desc_file = op.join(path, fname)
with open(desc_file, 'w') as outfile:
json.dump(kwargs, outfile)
def organize_cfin_data(path=None):
"""
Create the expected file-system structure for the
CFIN multi b-value diffusion data-set.
"""
dpd.fetch_cfin_multib()
if path is None:
os.makedirs(afq_home, exist_ok=True)
path = afq_home
bids_path = op.join(path, 'cfin_multib',)
derivatives_path = op.join(bids_path, 'derivatives')
dmriprep_folder = op.join(derivatives_path, 'dmriprep')
if not op.exists(derivatives_path):
anat_folder = op.join(dmriprep_folder, 'sub-01', 'ses-01', 'anat')
os.makedirs(anat_folder, exist_ok=True)
dwi_folder = op.join(dmriprep_folder, 'sub-01', 'ses-01', 'dwi')
os.makedirs(dwi_folder, exist_ok=True)
t1_img = dpd.read_cfin_t1()
nib.save(t1_img, op.join(anat_folder, 'sub-01_ses-01_T1w.nii.gz'))
dwi_img, gtab = dpd.read_cfin_dwi()
nib.save(dwi_img, op.join(dwi_folder, 'sub-01_ses-01_dwi.nii.gz'))
np.savetxt(op.join(dwi_folder, 'sub-01_ses-01_dwi.bvec'), gtab.bvecs)
np.savetxt(op.join(dwi_folder, 'sub-01_ses-01_dwi.bval'), gtab.bvals)
to_bids_description(
bids_path,
**{"BIDSVersion": "1.0.0",
"Name": "CFIN",
"Subjects": ["sub-01"]})
to_bids_description(
dmriprep_folder,
**{"Name": "CFIN",
"PipelineDescription": {"Name": "dipy"}})
def organize_stanford_data(path=None, clear_previous_afq=False):
"""
If necessary, downloads the Stanford HARDI dataset into DIPY directory and
creates a BIDS compliant file-system structure in AFQ data directory:
~/AFQ_data/
└── stanford_hardi
├── dataset_description.json
└── derivatives
├── freesurfer
│ ├── dataset_description.json
│ └── sub-01
│ └── ses-01
│ └── anat
│ ├── sub-01_ses-01_T1w.nii.gz
│ └── sub-01_ses-01_seg.nii.gz
└── vistasoft
├── dataset_description.json
└── sub-01
└── ses-01
└── dwi
├── sub-01_ses-01_dwi.bval
├── sub-01_ses-01_dwi.bvec
└── sub-01_ses-01_dwi.nii.gz
If clear_previous_afq is True and there is an afq folder in derivatives,
it will be removed.
"""
logger = logging.getLogger('AFQ.data')
# fetches data for first subject and session
logger.info('fetching Stanford HARDI data')
dpd.fetch_stanford_hardi()
if path is None:
if not op.exists(afq_home):
logger.info(f'creating AFQ home directory: {afq_home}')
os.makedirs(afq_home, exist_ok=True)
path = afq_home
bids_path = op.join(path, 'stanford_hardi',)
derivatives_path = op.join(bids_path, 'derivatives')
dmriprep_folder = op.join(derivatives_path, 'vistasoft')
freesurfer_folder = op.join(derivatives_path, 'freesurfer')
if clear_previous_afq:
afq_folder = op.join(derivatives_path, 'afq')
if op.exists(afq_folder):
shutil.rmtree(afq_folder)
if not op.exists(derivatives_path):
logger.info(f'creating derivatives directory: {derivatives_path}')
# anatomical data
anat_folder = op.join(freesurfer_folder, 'sub-01', 'ses-01', 'anat')
os.makedirs(anat_folder, exist_ok=True)
t1_img = dpd.read_stanford_t1()
nib.save(t1_img, op.join(anat_folder, 'sub-01_ses-01_T1w.nii.gz'))
seg_img = dpd.read_stanford_labels()[-1]
nib.save(seg_img, op.join(anat_folder,
'sub-01_ses-01_seg.nii.gz'))
# diffusion-weighted imaging data
dwi_folder = op.join(dmriprep_folder, 'sub-01', 'ses-01', 'dwi')
os.makedirs(dwi_folder, exist_ok=True)
dwi_img, gtab = dpd.read_stanford_hardi()
nib.save(dwi_img, op.join(dwi_folder, 'sub-01_ses-01_dwi.nii.gz'))
np.savetxt(op.join(dwi_folder, 'sub-01_ses-01_dwi.bvec'), gtab.bvecs)
np.savetxt(op.join(dwi_folder, 'sub-01_ses-01_dwi.bval'), gtab.bvals)
else:
logger.info('Dataset is already in place. If you want to fetch it '
+ 'again please first remove the folder '
+ derivatives_path)
# Dump out the description of the dataset
to_bids_description(bids_path,
**{"Name": "Stanford HARDI", "Subjects": ["sub-01"]})
# And descriptions of the pipelines in the derivatives:
to_bids_description(dmriprep_folder,
**{"Name": "Stanford HARDI",
"PipelineDescription": {"Name": "vistasoft"}})
to_bids_description(freesurfer_folder,
**{"Name": "Stanford HARDI",
"PipelineDescription": {"Name": "freesurfer"}})
fetch_hcp_atlas_16_bundles = _make_fetcher(
"fetch_hcp_atlas_16_bundles",
op.join(afq_home,
'hcp_atlas_16_bundles'),
'https://ndownloader.figshare.com/files/',
["11921522"],
["atlas_16_bundles.zip"],
md5_list=["b071f3e851f21ba1749c02fc6beb3118"],
doc="Download minimal Recobundles atlas",
unzip=True)
fetch_hcp_atlas_80_bundles = _make_fetcher(
"fetch_hcp_atlas_80_bundles",
op.join(afq_home,
'hcp_atlas_80_bundles'),
'https://ndownloader.figshare.com/files/',
["13638644"],
["Atlas_80_Bundles.zip"],
md5_list=["78331d527a10ec000d4f33bac472e099"],
doc="Download 80-bundle Recobundles atlas",
unzip=True)
def read_hcp_atlas(n_bundles=16):
"""
n_bundles : int
16 or 80, which selects among the two different
atlases:
https://figshare.com/articles/Simple_model_bundle_atlas_for_RecoBundles/6483614 #noqa
https://figshare.com/articles/Advanced_Atlas_of_80_Bundles_in_MNI_space/7375883 #noqa
"""
bundle_dict = {}
if n_bundles == 16:
_, folder = fetch_hcp_atlas_16_bundles()
atlas_folder = "Atlas_in_MNI_Space_16_bundles"
elif n_bundles == 80:
_, folder = fetch_hcp_atlas_80_bundles()
atlas_folder = "Atlas_80_Bundles"
whole_brain = load_tractogram(
op.join(
folder,
atlas_folder,
'whole_brain',
'whole_brain_MNI.trk'),
'same', bbox_valid_check=False).streamlines
bundle_dict['whole_brain'] = whole_brain
bundle_files = glob(
op.join(
folder,
atlas_folder,
"bundles", "*.trk"))
for bundle_file in bundle_files:
bundle = op.splitext(op.split(bundle_file)[-1])[0]
bundle_dict[bundle] = {}
bundle_dict[bundle]['sl'] = load_tractogram(
bundle_file,
'same',
bbox_valid_check=False).streamlines
feature = ResampleFeature(nb_points=100)
metric = AveragePointwiseEuclideanMetric(feature)
qb = QuickBundles(np.inf, metric=metric)
cluster = qb.cluster(bundle_dict[bundle]['sl'])
bundle_dict[bundle]['centroid'] = cluster.centroids[0]
# For some reason, this file-name has a 0 in it, instead of an O:
bundle_dict["IFOF_R"] = bundle_dict["IF0F_R"]
# In the 80-bundle case, there are two files, and both have identical
# content, so this is fine:
del bundle_dict["IF0F_R"]
return bundle_dict
fetch_aal_atlas = _make_fetcher(
"fetch_aal_atlas",
op.join(afq_home,
'aal_atlas'),
'https://ndownloader.figshare.com/files/',
["28416852",
"28416855"],
["MNI_AAL_AndMore.nii.gz",
"MNI_AAL.txt"],
md5_list=["69395b75a16f00294a80eb9428bf7855",
"59fd3284b17de2fbe411ca1c7afe8c65"],
doc="Download the AAL atlas",
unzip=False)
def read_aal_atlas(resample_to=None):
"""
Reads the AAL atlas [1]_.
Parameters
----------
template : nib.Nifti1Image class instance, optional
If provided, this is the template used and AAL atlas should be
registered and aligned to this template
.. [1] Tzourio-Mazoyer N, Landeau B, Papathanassiou D, Crivello F, Etard O,
Delcroix N, Mazoyer B, Joliot M. (2002). Automated anatomical
labeling of activations in SPM using a macroscopic anatomical
parcellation of the MNI MRI single-subject brain. Neuroimage. 2002;
15(1):273-89.
"""
file_dict, folder = fetch_aal_atlas()
out_dict = {}
for f in file_dict:
if f.endswith('.txt'):
out_dict['labels'] = pd.read_csv(op.join(folder, f))
else:
out_dict['atlas'] = nib.load(op.join(folder, f))
if resample_to is not None:
data = out_dict['atlas'].get_fdata()
oo = []
for ii in range(data.shape[-1]):
oo.append(resample(
data[..., ii],
resample_to,
out_dict['atlas'].affine,
resample_to.affine).get_fdata())
out_dict['atlas'] = nib.Nifti1Image(np.stack(oo, -1),
resample_to.affine)
return out_dict
def aal_to_regions(regions, atlas=None):
"""
Queries for large regions containing multiple AAL ROIs
Parameters
----------
regions : string or list of strings
The name of the requested region. This can either be an AAL-defined ROI
name (e.g, 'Occipital_Sup_L') or one of:
{'leftfrontal' | 'leftoccipital' | 'lefttemporal' | 'leftparietal'
| 'leftanttemporal' | 'leftparietal' | 'leftanttemporal'
| 'leftuncinatefront' | 'leftifoffront' | 'leftinfparietal'
| 'cerebellum' | 'leftarcfrontal' | 'leftarctemp' | 'leftcingpost'}
each of which there is an equivalent 'right' region for. In addition,
there are a few bilateral regions: {'occipital' | 'temporal'}, which
encompass both the right and left region of this name, as well as:
{'cstinferior' | 'cstsuperior'}
atlas : 4D array
Contains the AAL atlas in the correct coordinate frame with additional
volumes for CST and cingulate ROIs ("AAL and more").
Returns
------
3D indices to the requested region in the atlas volume
Notes
-----
Several regions can be referred to by multiple names:
'leftuncinatetemp' = 'leftilftemp'= 'leftanttemporal'
'rightuncinatetemp' = 'rightilftemp' = 'rightanttemporal'
'leftslfpar'] = 'leftinfparietal'
'rightslfpar' = 'rightinfparietal'
'leftslffrontal' = 'leftarcfrontal'
'rightslffrontal' = 'rightarcfrontal'
"""
if atlas is None:
atlas = read_aal_atlas()['atlas']
atlas_vals = {'leftfrontal': np.arange(1, 26, 2),
# Occipital regions do not include fusiform:
'leftoccipital': np.arange(43, 54, 2),
# Temporal regions include fusiform:
'lefttemporal': np.concatenate([np.arange(37, 42, 2),
np.array([55]),
np.arange(79, 90, 2)]),
'leftparietal': np.array([57, 67, 2]),
'leftanttemporal': np.array([41, 83, 87]),
'leftuncinatefront': np.array([5, 9, 15, 25]),
'leftifoffront': np.array([3, 5, 7, 9, 13, 15, 25]),
'leftinfparietal': np.array([61, 63, 65]),
'cerebellum': np.arange(91, 117),
'leftarcfrontal': np.array([1, 11, 13]),
'leftarctemp': np.array([79, 81, 85, 89]),
}
# Right symmetrical is off by one:
atlas_vals['rightfrontal'] = atlas_vals['leftfrontal'] + 1
atlas_vals['rightoccipital'] = atlas_vals['leftoccipital'] + 1
atlas_vals['righttemporal'] = atlas_vals['lefttemporal'] + 1
atlas_vals['rightparietal'] = atlas_vals['leftparietal'] + 1
atlas_vals['rightanttemporal'] = atlas_vals['leftanttemporal'] + 1
atlas_vals['rightuncinatefront'] = atlas_vals['leftuncinatefront'] + 1
atlas_vals['rightifoffront'] = atlas_vals['leftifoffront'] + 1
atlas_vals['rightinfparietal'] = atlas_vals['leftinfparietal'] + 1
atlas_vals['rightarcfrontal'] = atlas_vals['leftarcfrontal'] + 1
atlas_vals['rightarctemp'] = atlas_vals['leftarctemp'] + 1
# Multiply named regions:
atlas_vals['leftuncinatetemp'] = atlas_vals['leftilftemp'] =\
atlas_vals['leftanttemporal']
atlas_vals['rightuncinatetemp'] = atlas_vals['rightilftemp'] =\
atlas_vals['rightanttemporal']
atlas_vals['leftslfpar'] = atlas_vals['leftinfparietal']
atlas_vals['rightslfpar'] = atlas_vals['rightinfparietal']
atlas_vals['leftslffrontal'] = atlas_vals['leftarcfrontal']
atlas_vals['rightslffrontal'] = atlas_vals['rightarcfrontal']
# Bilateral regions:
atlas_vals['occipital'] = np.union1d(atlas_vals['leftoccipital'],
atlas_vals['rightoccipital'])
atlas_vals['temporal'] = np.union1d(atlas_vals['lefttemporal'],
atlas_vals['righttemporal'])
if isinstance(regions, str):
regions = [regions]
idxes = []
for region in regions:
region = region.lower() # Just to be sure
if region in atlas_vals.keys():
vol_idx = 0
vals = atlas_vals[region]
elif region == 'cstinferior':
vol_idx = 1
vals = np.array([1])
elif region == 'cstsuperior':
vol_idx = 2
vals = np.array([1])
elif region == 'leftcingpost':
vol_idx = 3
vals = np.array([1])
elif region == 'rightcingpost':
vol_idx = 4
vals = np.array([1])
# Broadcast vals, to test for equality over all three dimensions:
is_in = atlas[..., vol_idx] == vals[:, None, None, None]
# Then collapse the 4th dimension (each val), to get the 3D array:
is_in = np.sum(is_in, 0)
idxes.append(np.array(np.where(is_in)).T)
return np.concatenate(idxes, axis=0)
def bundles_to_aal(bundles, atlas=None):
"""
Given a sequence of AFQ bundle names, give back a sequence of lists
with [target0, target1] being each NX3 arrays of the endpoint indices
for the first and last node of the streamlines in this bundle.
"""
if atlas is None:
atlas = read_aal_atlas()['atlas']
endpoint_dict = {
"ATR_L": [['leftfrontal'], None],
"ATR_R": [['rightfrontal'], None],
"CST_L": [['cstinferior'], ['cstsuperior']],
"CST_R": [['cstinferior'], ['cstsuperior']],
"CGC_L": [['leftcingpost'], None],
"CGC_R": [['rightcingpost'], None],
"HCC_L": [None, None],
"HCC_R": [None, None],
"FP": [['rightoccipital'], ['leftoccipital']],
"FA": [['rightfrontal'], ['leftfrontal']],
"IFO_L": [['leftoccipital'], ['leftifoffront']],
"IFO_R": [['rightoccipital'], ['rightifoffront']],
"ILF_L": [['leftoccipital'], ['leftilftemp']],
"ILF_R": [['rightoccipital'], ['rightilftemp']],
"SLF_L": [['leftslffrontal'], ['leftinfparietal']],
"SLF_R": [['rightslffrontal'], ['rightinfparietal']],
"UNC_L": [['leftanttemporal'], ['leftuncinatefront']],
"UNC_R": [['rightanttemporal'], ['rightuncinatefront']],
"ARC_L": [['leftfrontal'], ['leftarctemp']],
"ARC_R": [['rightfrontal'], ['rightarctemp']],
"AntFrontal": [None, None],
"Motor": [None, None],
"Occipital": [None, None],
"Orbital": [None, None],
"PostParietal": [None, None],
"SupFrontal": [None, None],
"SupParietal": [None, None],
"Temporal": [None, None]}
targets = []
for bundle in bundles:
targets.append([])
if (endpoint_dict.get(bundle)):
for region in endpoint_dict[bundle]:
if region is None:
targets[-1].append(None)
else:
targets[-1].append(aal_to_regions(region, atlas=atlas))
else:
logger = logging.getLogger('AFQ.data')
logger.warning(f"Segmentation end points undefined for {bundle},"
+ " continuing without end points")
targets[-1] = [None, None]
return targets
def s3fs_nifti_write(img, fname, fs=None):
"""
Write a nifti file straight to S3
Paramters
---------
img : nib.Nifti1Image class instance
The image containing data to be written into S3
fname : string
Full path (including bucket name and extension) to the S3 location
where the file is to be saved.
fs : an s3fs.S3FileSystem class instance, optional
A file-system to refer to. Default to create a new file-system
"""
if fs is None:
fs = s3fs.S3FileSystem()
bio = BytesIO()
file_map = img.make_file_map({'image': bio, 'header': bio})
img.to_file_map(file_map)
data = gzip.compress(bio.getvalue())
with fs.open(fname, 'wb') as ff:
ff.write(data)
def s3fs_nifti_read(fname, fs=None, anon=False):
"""
Lazily reads a nifti image from S3.
Paramters
---------
fname : string
Full path (including bucket name and extension) to the S3 location
of the file to be read.
fs : an s3fs.S3FileSystem class instance, optional
A file-system to refer to. Default to create a new file-system.
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config files,
EC2 IAM server, in that order). Default: True
Returns
-------
nib.Nifti1Image class instance
Notes
-----
Because the image is lazily loaded, data stored in the file
is not transferred until `get_fdata` is called.
"""
if fs is None:
fs = s3fs.S3FileSystem(anon=anon)
with fs.open(fname) as ff:
zz = gzip.open(ff)
rr = zz.read()
bb = BytesIO(rr)
fh = nib.FileHolder(fileobj=bb)
img = nib.Nifti1Image.from_file_map({'header': fh, 'image': fh})
return img
def write_json(fname, data):
"""
Write data to JSON file.
Parameters
----------
fname : str
Full path to the file to write.
data : dict
A dict containing the data to write.
Returns
-------
None
"""
with open(fname, 'w') as ff:
json.dump(data, ff, default=lambda obj: "Not Serializable")
def read_json(fname):
"""
Read data from a JSON file.
Parameters
----------
fname : str
Full path to the data-containing file
Returns
-------
dict
"""
with open(fname, 'w') as ff:
out = json.load(ff)
return out
def s3fs_json_read(fname, fs=None, anon=False):
"""
Reads json directly from S3
Paramters
---------
fname : str
Full path (including bucket name and extension) to the file on S3.
fs : an s3fs.S3FileSystem class instance, optional
A file-system to refer to. Default to create a new file-system.
anon : bool
Whether to use anonymous connection (public buckets only).
If False, uses the key/secret given, or boto’s credential
resolver (client_kwargs, environment, variables, config files,
EC2 IAM server, in that order). Default: True
"""
if fs is None:
fs = s3fs.S3FileSystem(anon=anon)
with fs.open(fname) as ff:
data = json.load(ff)
return data
def s3fs_json_write(data, fname, fs=None):
"""
Writes json from a dict directly into S3
Parameters
----------
data : dict
The json to be written out
fname : str
Full path (including bucket name and extension) to the file to
be written out on S3
fs : an s3fs.S3FileSystem class instance, optional
A file-system to refer to. Default to create a new file-system.
"""
if fs is None:
fs = s3fs.S3FileSystem()
with fs.open(fname, 'w') as ff:
json.dump(data, ff)
def _apply_mask(template_img, resolution=1):
"""
Helper function, gets MNI brain mask and applies it to template_img.
Parameters
----------
template_img : nib.Nifti1Image
Unmasked template
resolution : int, optional
Resolution of mask. Default: 1
Returns
-------
Masked template as nib.Nifti1Image
"""
mask_img = nib.load(str(tflow.get('MNI152NLin2009cAsym',
resolution=resolution,
desc='brain',
suffix='mask')))
template_data = template_img.get_fdata()
mask_data = mask_img.get_fdata()
if mask_data.shape != template_data.shape:
mask_img = nib.Nifti1Image(
resample(
mask_data,
template_data,
mask_img.affine,
template_img.affine).get_fdata(),
template_img.affine)
mask_data = mask_img.get_fdata()
out_data = template_data * mask_data
return nib.Nifti1Image(out_data, template_img.affine)
def read_mni_template(resolution=1, mask=True, weight="T2w"):
"""
Reads the MNI T1w or T2w template
Parameters
----------
resolution : int, optional.
Either 1 or 2, the resolution in mm of the voxels. Default: 1.
mask : bool, optional
Whether to mask the data with a brain-mask before returning the image.
Default : True
weight: str, optional
Which relaxation technique to use.
Should be either "T2w" or "T1w".
Default : "T2w"
Returns
-------
nib.Nifti1Image class instance
containing masked or unmasked T1w or template.
"""
template_img = nib.load(str(tflow.get('MNI152NLin2009cAsym',
desc=None,
resolution=resolution,
suffix=weight,
extension='nii.gz')))
if not mask:
return template_img
else:
return _apply_mask(template_img, resolution)
fetch_biobank_templates = \
_make_fetcher(
"fetch_biobank_templates",
op.join(afq_home,
'biobank_templates'),
"http://biobank.ctsu.ox.ac.uk/showcase/showcase/docs/",
["bmri_group_means.zip"],
["bmri_group_means.zip"],
data_size="1.1 GB",
doc="Download UK Biobank templates",
unzip=True)
def read_ukbb_fa_template(mask=True):
"""
Reads the UK Biobank FA template
Parameters
----------
mask : bool, optional
Whether to mask the data with a brain-mask before returning the image.
Default : True
Returns
-------
nib.Nifti1Image class instance containing the FA template.
"""
fa_folder = op.join(
afq_home,
'biobank_templates',
'UKBiobank_BrainImaging_GroupMeanTemplates'
)
fa_path = op.join(
fa_folder,
'dti_FA.nii.gz'
)
if not op.exists(fa_path):
logger = logging.getLogger('AFQ.data')
logger.warning(
"Downloading brain MRI group mean statistics from UK Biobank. "
+ "This download is approximately 1.1 GB. "
+ "It is currently necessary to access the FA template.")
files, folder = fetch_biobank_templates()
# remove zip
for filename in files:
os.remove(op.join(folder, filename))
# remove non-FA related directories
for filename in os.listdir(fa_folder):
full_path = op.join(fa_folder, filename)
if full_path != fa_path:
if os.path.isfile(full_path):
os.remove(full_path)
else:
shutil.rmtree(full_path)
template_img = nib.load(fa_path)
if not mask:
return template_img
else:
return _apply_mask(template_img, 1)
|
bsd-2-clause
|
sinhrks/numpy
|
numpy/core/tests/test_multiarray.py
|
5
|
220725
|
from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# General case of possible ravel that is not contiguous but
# works and includes a 1-sized axis with non matching stride
a = a.swapaxes(-1, -2) # swap back to C-order
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
a = a.T # swap all to Fortran order
assert_(np.may_share_memory(a.ravel(order='F'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
# Test negative strides:
a = np.arange(4)[::-1].reshape(2, 2)
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# Test keeporder with weirdly strided 1-sized dims (1-d first stride)
a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order
strides = list(a.strides)
strides[0] = -12
strides[-1] = 0
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), a.ravel('C'))
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
#Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
if sys.platform != 'darwin':
return
def aligned_array(shape, align, dtype, order='C'):
d = np.dtype()
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
from numpy.core._internal import _view_is_safe
class TestObjViewSafetyFuncs(TestCase):
def test_view_safety(self):
psize = np.dtype('p').itemsize
# creates dtype but with extra character code - for missing 'p' fields
def mtype(s):
n, offset, fields = 0, 0, []
for c in s.split(','): # subarrays won't work
if c != '-':
fields.append(('f{0}'.format(n), c, offset))
n += 1
offset += np.dtype(c).itemsize if c != '-' else psize
names, formats, offsets = zip(*fields)
return np.dtype({'names': names, 'formats': formats,
'offsets': offsets, 'itemsize': offset})
# test nonequal itemsizes with objects:
# these should succeed:
_view_is_safe(np.dtype('O,p,O,p'), np.dtype('O,p,O,p,O,p'))
_view_is_safe(np.dtype('O,O'), np.dtype('O,O,O'))
# these should fail:
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,O'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,p'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('p,O'))
# test nonequal itemsizes with missing fields:
# these should succeed:
_view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p'))
_view_is_safe(np.dtype('p,p'), np.dtype('p,p,p'))
# these should fail:
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p'))
# scans through positions at which we can view a type
def scanView(d1, otype):
goodpos = []
for shift in range(d1.itemsize - np.dtype(otype).itemsize+1):
d2 = np.dtype({'names': ['f0'], 'formats': [otype],
'offsets': [shift], 'itemsize': d1.itemsize})
try:
_view_is_safe(d1, d2)
except TypeError:
pass
else:
goodpos.append(shift)
return goodpos
# test partial overlap with object field
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'O'),
[psize, 4*psize, 5*psize])
# test partial overlap with missing field
assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
# test nested structures with objects:
nestedO = np.dtype([('f0', 'p'), ('f1', 'p,O,p')])
assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize])
assert_equal(scanView(nestedO, 'O'), [2*psize])
# test nested structures with missing fields:
nestedM = np.dtype([('f0', 'p'), ('f1', mtype('p,-,p'))])
assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize])
# test subarrays with objects
subarrayO = np.dtype('p,(2,3)O,p')
assert_equal(scanView(subarrayO, 'p'), [0, 7*psize])
assert_equal(scanView(subarrayO, 'O'),
list(range(psize, 6*psize+1, psize)))
#test dtype with overlapping fields
overlapped = np.dtype({'names': ['f0', 'f1', 'f2', 'f3'],
'formats': ['p', 'p', 'p', 'p'],
'offsets': [0, 1, 3*psize-1, 3*psize],
'itemsize': 4*psize})
assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize])
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
sniemi/SamPy
|
sandbox/src1/examples/colours.py
|
1
|
1307
|
#!/usr/bin/env python
"""
Some simple functions to generate colours.
"""
import numpy as npy
from matplotlib.colors import colorConverter
def pastel(colour, weight=2.4):
""" Convert colour into a nice pastel shade"""
rgb = npy.asarray(colorConverter.to_rgb(colour))
# scale colour
maxc = max(rgb)
if maxc < 1.0 and maxc > 0:
# scale colour
scale = 1.0 / maxc
rgb = rgb * scale
# now decrease saturation
total = rgb.sum()
slack = 0
for x in rgb:
slack += 1.0 - x
# want to increase weight from total to weight
# pick x s.t. slack * x == weight - total
# x = (weight - total) / slack
x = (weight - total) / slack
rgb = [c + (x * (1.0-c)) for c in rgb]
return rgb
def get_colours(n):
""" Return n pastel colours. """
base = npy.asarray([[1,0,0], [0,1,0], [0,0,1]])
if n <= 3:
return base[0:n]
# how many new colours to we need to insert between
# red and green and between green and blue?
needed = (((n - 3) + 1) / 2, (n - 3) / 2)
colours = []
for start in (0, 1):
for x in npy.linspace(0, 1, needed[start]+2):
colours.append((base[start] * (1.0 - x)) +
(base[start+1] * x))
return [pastel(c) for c in colours[0:n]]
|
bsd-2-clause
|
chipmuenk/A2SRC
|
A2SRC/ui_bak/guiA2SRC.py
|
1
|
10143
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'guiA2SRC.ui'
#
# Created: Mon Jan 05 18:49:38 2015
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(733, 779)
self.matplotlibwidget = MatplotlibWidget(Dialog)
self.matplotlibwidget.setGeometry(QtCore.QRect(320, 40, 400, 300))
self.matplotlibwidget.setObjectName(_fromUtf8("matplotlibwidget"))
self.verticalLayoutWidget = QtGui.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 30, 271, 329))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.labelModulation = QtGui.QLabel(self.verticalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelModulation.sizePolicy().hasHeightForWidth())
self.labelModulation.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.labelModulation.setFont(font)
self.labelModulation.setObjectName(_fromUtf8("labelModulation"))
self.verticalLayout.addWidget(self.labelModulation)
self.comboBoxModulationType = QtGui.QComboBox(self.verticalLayoutWidget)
self.comboBoxModulationType.setEditable(False)
self.comboBoxModulationType.setObjectName(_fromUtf8("comboBoxModulationType"))
self.comboBoxModulationType.addItem(_fromUtf8(""))
self.comboBoxModulationType.addItem(_fromUtf8(""))
self.comboBoxModulationType.addItem(_fromUtf8(""))
self.comboBoxModulationType.addItem(_fromUtf8(""))
self.comboBoxModulationType.addItem(_fromUtf8(""))
self.verticalLayout.addWidget(self.comboBoxModulationType)
self.comboBoxModulationPhase = QtGui.QComboBox(self.verticalLayoutWidget)
self.comboBoxModulationPhase.setObjectName(_fromUtf8("comboBoxModulationPhase"))
self.comboBoxModulationPhase.addItem(_fromUtf8(""))
self.comboBoxModulationPhase.addItem(_fromUtf8(""))
self.verticalLayout.addWidget(self.comboBoxModulationPhase)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.lineEditModulationFreq = QtGui.QLineEdit(self.verticalLayoutWidget)
self.lineEditModulationFreq.setObjectName(_fromUtf8("lineEditModulationFreq"))
self.horizontalLayout.addWidget(self.lineEditModulationFreq)
self.labelModulationFreq = QtGui.QLabel(self.verticalLayoutWidget)
self.labelModulationFreq.setObjectName(_fromUtf8("labelModulationFreq"))
self.horizontalLayout.addWidget(self.labelModulationFreq)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.lineEditModulationAmp = QtGui.QLineEdit(self.verticalLayoutWidget)
self.lineEditModulationAmp.setObjectName(_fromUtf8("lineEditModulationAmp"))
self.horizontalLayout_2.addWidget(self.lineEditModulationAmp)
self.labelModulationAmp = QtGui.QLabel(self.verticalLayoutWidget)
self.labelModulationAmp.setObjectName(_fromUtf8("labelModulationAmp"))
self.horizontalLayout_2.addWidget(self.labelModulationAmp)
self.verticalLayout.addLayout(self.horizontalLayout_2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.labelSynth = QtGui.QLabel(self.verticalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelSynth.sizePolicy().hasHeightForWidth())
self.labelSynth.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.labelSynth.setFont(font)
self.labelSynth.setObjectName(_fromUtf8("labelSynth"))
self.verticalLayout_2.addWidget(self.labelSynth)
self.comboBoxSynthType = QtGui.QComboBox(self.verticalLayoutWidget)
self.comboBoxSynthType.setEditable(False)
self.comboBoxSynthType.setObjectName(_fromUtf8("comboBoxSynthType"))
self.comboBoxSynthType.addItem(_fromUtf8(""))
self.comboBoxSynthType.addItem(_fromUtf8(""))
self.comboBoxSynthType.addItem(_fromUtf8(""))
self.comboBoxSynthType.addItem(_fromUtf8(""))
self.comboBoxSynthType.addItem(_fromUtf8(""))
self.verticalLayout_2.addWidget(self.comboBoxSynthType)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.lineEditSynthFreq = QtGui.QLineEdit(self.verticalLayoutWidget)
self.lineEditSynthFreq.setObjectName(_fromUtf8("lineEditSynthFreq"))
self.horizontalLayout_3.addWidget(self.lineEditSynthFreq)
self.labelSynthFreq = QtGui.QLabel(self.verticalLayoutWidget)
self.labelSynthFreq.setObjectName(_fromUtf8("labelSynthFreq"))
self.horizontalLayout_3.addWidget(self.labelSynthFreq)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.lineEditSynthAmp = QtGui.QLineEdit(self.verticalLayoutWidget)
self.lineEditSynthAmp.setObjectName(_fromUtf8("lineEditSynthAmp"))
self.horizontalLayout_4.addWidget(self.lineEditSynthAmp)
self.labelSynthAmp = QtGui.QLabel(self.verticalLayoutWidget)
self.labelSynthAmp.setObjectName(_fromUtf8("labelSynthAmp"))
self.horizontalLayout_4.addWidget(self.labelSynthAmp)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.pushButton_2 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.horizontalLayout_5.addWidget(self.pushButton_2)
self.pushButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout_5.addWidget(self.pushButton)
self.verticalLayout_2.addLayout(self.horizontalLayout_5)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.labelModulation.setText(_translate("Dialog", "Jitter Modulation", None))
self.comboBoxModulationType.setItemText(0, _translate("Dialog", "None", None))
self.comboBoxModulationType.setItemText(1, _translate("Dialog", "Sinusoidal", None))
self.comboBoxModulationType.setItemText(2, _translate("Dialog", "Triangular", None))
self.comboBoxModulationType.setItemText(3, _translate("Dialog", "Rectangular", None))
self.comboBoxModulationType.setItemText(4, _translate("Dialog", "Random", None))
self.comboBoxModulationPhase.setItemText(0, _translate("Dialog", "R / L In-Phase Mod.", None))
self.comboBoxModulationPhase.setItemText(1, _translate("Dialog", "R / L Counter-Phase Mod.", None))
self.lineEditModulationFreq.setText(_translate("Dialog", "50", None))
self.labelModulationFreq.setText(_translate("Dialog", " Hz", None))
self.lineEditModulationAmp.setText(_translate("Dialog", "1e-3", None))
self.labelModulationAmp.setText(_translate("Dialog", " UI", None))
self.labelSynth.setText(_translate("Dialog", "Synthetic Signal", None))
self.comboBoxSynthType.setItemText(0, _translate("Dialog", "None, use WAV-File", None))
self.comboBoxSynthType.setItemText(1, _translate("Dialog", "Sinusoidal", None))
self.comboBoxSynthType.setItemText(2, _translate("Dialog", "Triangular", None))
self.comboBoxSynthType.setItemText(3, _translate("Dialog", "Rectangular", None))
self.comboBoxSynthType.setItemText(4, _translate("Dialog", "Random", None))
self.lineEditSynthFreq.setText(_translate("Dialog", "1000", None))
self.labelSynthFreq.setText(_translate("Dialog", " Hz", None))
self.lineEditSynthAmp.setText(_translate("Dialog", "50", None))
self.labelSynthAmp.setText(_translate("Dialog", "% FS", None))
self.pushButton_2.setText(_translate("Dialog", "Open File", None))
self.pushButton.setText(_translate("Dialog", "Start / Stop", None))
from matplotlibwidget import MatplotlibWidget
|
apache-2.0
|
garaud/ezhc
|
ezhc/series.py
|
2
|
3823
|
import pandas as pd
def series(df, options, *args, **kwargs):
idx = df.index
col = df.columns
data = df.values
assert(isinstance(idx, pd.core.index.Index))
series = []
for k, c in enumerate(col):
if df[c].dtype.kind in 'fib':
v = data[:, k]
sec = c in kwargs.get('secondary_y', [])
d = {
'name': c if not sec else c + ' (right)',
'yAxis': int(sec),
'data': [[idx[q], v[q]] for q in range(len(v))],
}
if c in kwargs.get('color', []):
d['color'] = kwargs['color'].get(c)
if kwargs.get('dashStyle', []):
d['dashStyle'] = kwargs['dashStyle'].get(c, 'Solid')
series.append(d)
return series
def series_drilldown(df, options, *args, **kwargs):
idx = df.index
col = df.columns
assert(isinstance(idx, pd.core.index.MultiIndex))
assert(len(idx.levshape)==2)
assert(len(col)==1)
assert(df[col[0]].dtype.kind in 'if')
levone = list(idx.levels[0])
data = []
drilldownSeries = []
for c in levone:
dfs = df.xs(c)
ii = dfs.index.values.flatten()
vv = dfs.values.flatten()
d1 = {
'name': c,
'y': dfs.sum().values[0],
'drilldown': c if len(dfs)>1 else None,
}
data.append(d1)
if len(dfs)>1:
d2 = {
'name': c,
'data': [[str(ii[q]), vv[q]] for q in range(len(ii))],
'id': c,
}
drilldownSeries.append(d2)
series = [{'name': col[0],'data': data, 'colorByPoint': True}]
return series, drilldownSeries
def series_scatter(df, options, *args, **kwargs):
idx = df.index
col = df.columns
assert(isinstance(idx, pd.core.index.MultiIndex))
assert(len(idx.levshape)==2)
assert(len(col)==1)
assert(df[col[0]].dtype.kind in 'iO')
data = df.values.flatten()
elmt = list(set(data))
color = kwargs.get('color', {})
series = []
for e in elmt:
dfs = df[df.iloc[:, 0]==e]
idx = dfs.index
series.append({'animation': False,
'name': e,
'color': color.get(e, None),
'data': [[idx[k][0], idx[k][1]] for k in range(len(dfs))],
})
return series
def series_bubble(df, options, *args, **kwargs):
idx = df.index
col = df.columns
assert(isinstance(idx, pd.core.index.MultiIndex))
assert(len(idx.levshape)==3)
assert(len(col)==1)
assert(df[col[0]].dtype.kind in 'fib')
names = list(idx.levels[0])
color = kwargs.get('color', {})
series = []
for s in names:
dfs = df.xs(s)
v = dfs.values.flatten()
idxs = dfs.index
d = {
'name': s,
'data': [[idxs[q][0], idxs[q][1], v[q]] for q in range(len(v))],
}
if s in kwargs.get('color', []):
d['color'] = kwargs['color'].get(s)
series.append(d)
return series
def series_treemap(df, options, *args, **kwargs):
idx = df.index
col = df.columns
assert(isinstance(idx, pd.core.index.MultiIndex))
assert(df[col[0]].dtype.kind in 'fi')
data = df.values
elmt = list(set(data))
color = kwargs.get('color', {})
series = []
for e in elmt:
dfs = df[df.iloc[:, 0]==e]
idx = dfs.index
series.append({'animation': False,
'name': e,
'color': color.get(e, None),
'data': [[idx[k][0], idx[k][1]] for k in range(len(dfs))],
})
return series
|
mit
|
lin-credible/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
134
|
7452
|
"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
ahoyosid/scikit-learn
|
examples/svm/plot_svm_margin.py
|
318
|
2328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
RomainBrault/scikit-learn
|
sklearn/preprocessing/__init__.py
|
268
|
1319
|
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
|
bsd-3-clause
|
procoder317/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
98
|
20870
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
|
bsd-3-clause
|
DrXyzzy/smc
|
src/smc_sagews/smc_sagews/tests/conftest.py
|
5
|
27209
|
from __future__ import absolute_import
import pytest
import os
import re
import socket
import json
import signal
import struct
import hashlib
import time
import six
from datetime import datetime
# timeout for socket to sage_server in seconds
default_timeout = 20
###
# much of the code here is copied from sage_server.py
# cut and paste was done because it takes over 30 sec to import sage_server
# and requires the script to be run from sage -sh
###
def unicode8(s):
try:
if six.PY3:
return str(s, 'utf8')
else:
return str(s).encode('utf-8')
except:
try:
return str(s)
except:
return s
PID = os.getpid()
def log(*args):
mesg = "%s (%s): %s\n" % (PID, datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x) for x in args]))
print(mesg)
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]
j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex((int(s[j], 16) & 0x3) | 0x8)[-1]
j += 1
return ''.join(r)
class ConnectionJSON(object):
def __init__(self, conn):
# avoid common mistake -- conn is supposed to be from socket.socket...
assert not isinstance(conn, ConnectionJSON)
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
if six.PY3 and type(s) == str:
s = s.encode('utf8')
length_header = struct.pack(">L", len(s))
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
#log(u"sending message '", truncate_text(m, 256), u"'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
s = uuidsha1(blob)
if six.PY3 and type(blob) == bytes:
# we convert all to bytes first, to avoid unnecessary conversions
self._send(('b' + s).encode('utf8') + blob)
else:
# old sage py2 code
self._send('b' + s + blob)
return s
def send_file(self, filename):
#log("sending file '%s'"%filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
# see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
for i in range(20):
try:
r = self._conn.recv(n)
return r
except socket.error as exc:
if isinstance(exc, socket.timeout):
raise
else:
(errno, msg) = exc
if errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
print(("expecting 4 byte header, got", n))
tries = 0
while tries < 5:
tries += 1
n2 = self._recv(4 - len(n))
n += n2
if len(n) >= 4:
break
else:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
#print("test got header, expect message of length %s"%n)
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if six.PY3 and type(s) == bytes:
s = s.decode('utf8')
mtyp = s[0]
if mtyp == 'j':
try:
return 'json', json.loads(s[1:])
except Exception as msg:
log("Unable to parse JSON '%s'" % s[1:])
raise
elif mtyp == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'" % s[0])
def set_timeout(self, timeout):
"set socket timeout in seconds"
self._conn.settimeout(timeout)
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
class Message(object):
def _new(self, event, props={}):
m = {'event': event}
for key, val in list(props.items()):
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid': pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(
self,
id,
stdout=None,
stderr=None,
code=None,
html=None,
javascript=None,
coffeescript=None,
interact=None,
md=None,
tex=None,
d3=None,
file=None,
raw_input=None,
obj=None,
once=None,
hide=None,
show=None,
events=None,
clear=None,
delete_last=None,
done=False # CRITICAL: done must be specified for multi-response; this is assumed by sage_session.coffee; otherwise response assumed single.
):
m = self._new('output')
m['id'] = id
t = truncate_text_warn
did_truncate = False
import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate, tmsg = t(code['source'],
sage_server.MAX_CODE_SIZE,
'MAX_CODE_SIZE')
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate, tmsg = t(stderr,
sage_server.MAX_STDERR_SIZE,
'MAX_STDERR_SIZE')
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate, tmsg = t(stdout,
sage_server.MAX_STDOUT_SIZE,
'MAX_STDOUT_SIZE')
if html is not None and len(html) > 0:
m['html'], did_truncate, tmsg = t(html, sage_server.MAX_HTML_SIZE,
'MAX_HTML_SIZE')
if md is not None and len(md) > 0:
m['md'], did_truncate, tmsg = t(md, sage_server.MAX_MD_SIZE,
'MAX_MD_SIZE')
if tex is not None and len(tex) > 0:
tex['tex'], did_truncate, tmsg = t(tex['tex'],
sage_server.MAX_TEX_SIZE,
'MAX_TEX_SIZE')
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + tmsg
else:
m['stderr'] = '\n' + tmsg
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
# NOTE: these functions are NOT in sage_server.py
def save_blob(self, sha1):
return self._new('save_blob', {'sha1': sha1})
def introspect(self, id, line, top):
return self._new('introspect', {'id': id, 'line': line, 'top': top})
message = Message()
###
# end of copy region
###
def set_salvus_path(self, id):
r"""
create json message to set path and file at start of virtual worksheet
"""
m = self._new('execute_code', locals())
# hard code SMC for now so we don't have to run with sage wrapper
SMC = os.path.join(os.environ["HOME"], ".smc")
default_log_file = os.path.join(SMC, "sage_server", "sage_server.log")
default_pid_file = os.path.join(SMC, "sage_server", "sage_server.pid")
def get_sage_server_info(log_file=default_log_file):
for loop_count in range(3):
# log file ~/.smc/sage_server/sage_server.log
# sample sage_server startup line in first lines of log:
# 3136 (2016-08-18 15:02:49.372): Sage server 127.0.0.1:44483
try:
with open(log_file, "r") as inf:
for lno in range(5):
line = inf.readline().strip()
m = re.search(
"Sage server (?P<host>[\w.]+):(?P<port>\d+)$", line)
if m:
host = m.group('host')
port = int(m.group('port'))
#return host, int(port)
break
else:
raise ValueError('Server info not found in log_file',
log_file)
break
except IOError:
print("starting new sage_server")
os.system(start_cmd())
time.sleep(5.0)
else:
pytest.fail(
"Unable to open log file %s\nThere is probably no sage server running. You either have to open a sage worksheet or run smc-sage-server start"
% log_file)
print(("got host %s port %s" % (host, port)))
return host, int(port)
secret_token = None
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
if 'COCALC_SECRET_TOKEN' in os.environ:
secret_token_path = os.environ['COCALC_SECRET_TOKEN']
else:
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def client_unlock_connection(sock):
secret_token = open(secret_token_path).read().strip()
sock.sendall(secret_token.encode())
def path_info():
file = __file__
full_path = os.path.abspath(file)
head, tail = os.path.split(full_path)
#file = head + "/testing.sagews"
return head, file
def recv_til_done(conn, test_id):
r"""
Discard json messages from server for current test_id until 'done' is True
or limit is reached. Used in finalizer for single cell tests.
"""
for loop_count in range(5):
typ, mesg = conn.recv()
assert typ == 'json'
assert mesg['id'] == test_id
assert 'done' in mesg
if mesg['done']:
break
else:
pytest.fail("too many responses for message id %s" % test_id)
def my_sage_startup():
"""
name of pytest SAGE_STARTUP_FILE
used in other test files so we export it
"""
return "a-init.sage"
def start_cmd(action='start'):
"""
launch sage-server with env setting for startup file
- `` action `` -- string "start" | "restart"
"""
pssf = os.path.join(os.path.dirname(__file__), my_sage_startup())
cmd = "export SAGE_STARTUP_FILE={};smc-sage-server {}".format(pssf, action)
return cmd
###
# Start of fixtures
###
@pytest.fixture(autouse=True, scope="session")
def sage_server_setup(pid_file=default_pid_file, log_file=default_log_file):
r"""
make sure sage_server pid file exists and process running at given pid
"""
os.system(start_cmd('restart'))
for loop_count in range(20):
time.sleep(0.5)
if not os.path.exists(log_file):
continue
lmsg = "Starting server listening for connections"
if lmsg in open(log_file).read():
break
else:
pytest.fail("Unable to start sage_server and setup log file")
return
@pytest.fixture()
def test_id(request):
r"""
Return increasing sequence of integers starting at 1. This number is used as
test id as well as message 'id' value so sage_server log can be matched
with pytest output.
"""
test_id.id += 1
return test_id.id
test_id.id = 1
# see http://doc.pytest.org/en/latest/tmpdir.html#the-tmpdir-factory-fixture
@pytest.fixture(scope='session')
def image_file(tmpdir_factory):
def make_img():
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
my_circle = plt.Circle((0.5, 0.5), 0.2)
fig, ax = plt.subplots()
ax.add_artist(my_circle)
return fig
fn = tmpdir_factory.mktemp('data').join('my_circle.png')
make_img().savefig(str(fn))
return fn
@pytest.fixture(scope='session')
def data_path(tmpdir_factory):
path = tmpdir_factory.mktemp("data")
path.ensure_dir()
return path
@pytest.fixture()
def execdoc(request, sagews, test_id):
r"""
Fixture function execdoc. Depends on two other fixtures, sagews and test_id.
EXAMPLES:
::
def test_assg(execdoc):
execdoc("random?")
"""
def execfn(code, pattern='Docstring'):
m = message.execute_code(code=code, id=test_id)
sagews.send_json(m)
typ, mesg = sagews.recv()
assert typ == 'json'
assert mesg['id'] == test_id
assert 'code' in mesg
assert 'source' in mesg['code']
assert re.sub('\s+', '', pattern) in re.sub('\s+', '',
mesg['code']['source'])
def fin():
recv_til_done(sagews, test_id)
request.addfinalizer(fin)
return execfn
@pytest.fixture()
def exec2(request, sagews, test_id):
r"""
Fixture function exec2. Depends on two other fixtures, sagews and test_id.
If output & patterns are omitted, the cell is not expected to produce a
stdout result. All arguments after 'code' are optional.
If argument `timeout` is provided, the default socket timeout
for connection to sage_server will be overridden to the value of `timeout` in seconds.
- `` code `` -- string of code to run
- `` output `` -- string or list of strings of output to be matched up to leading & trailing whitespace
- `` pattern `` -- regex to match with expected stdout output
- `` html_pattern `` -- regex to match with expected html output
- `` timeout `` -- socket timeout in seconds
- `` errout `` -- stderr substring to be matched. stderr may come as several messages
EXAMPLES:
::
def test_assg(exec2):
code = "x = 42\nx\n"
output = "42\n"
exec2(code, output)
::
def test_set_file_env(exec2):
code = "os.chdir(salvus.data[\'path\']);__file__=salvus.data[\'file\']"
exec2(code)
::
def test_sh(exec2):
exec2("sh('date +%Y-%m-%d')", pattern = '^\d{4}-\d{2}-\d{2}$')
.. NOTE::
If `output` is a list of strings, `pattern` and `html_pattern` are ignored
"""
def execfn(code,
output=None,
pattern=None,
html_pattern=None,
timeout=default_timeout,
errout=None):
m = message.execute_code(code=code, id=test_id)
m['preparse'] = True
if timeout is not None:
print(('overriding socket timeout to {}'.format(timeout)))
sagews.set_timeout(timeout)
# send block of code to be executed
sagews.send_json(m)
# check stdout
if isinstance(output, list):
for o in output:
typ, mesg = sagews.recv()
assert typ == 'json'
assert mesg['id'] == test_id
assert 'stdout' in mesg
assert o.strip() in (mesg['stdout']).strip()
elif output or pattern:
typ, mesg = sagews.recv()
assert typ == 'json'
assert mesg['id'] == test_id
assert 'stdout' in mesg
mout = mesg['stdout']
if output is not None:
assert output.strip() in mout
elif pattern is not None:
assert re.search(pattern, mout) is not None
elif html_pattern:
typ, mesg = sagews.recv()
assert typ == 'json'
assert mesg['id'] == test_id
assert 'html' in mesg
assert re.search(html_pattern, mesg['html']) is not None
elif errout:
mout = ""
while True:
typ, mesg = sagews.recv()
assert typ == 'json'
assert mesg['id'] == test_id
assert 'stderr' in mesg
mout += mesg['stderr']
if errout.strip() in mout:
break
def fin():
recv_til_done(sagews, test_id)
request.addfinalizer(fin)
return execfn
@pytest.fixture()
def execbuf(request, sagews, test_id):
r"""
Fixture function execbuf.
Inner function will execute code, then append messages received
from sage_server.
As messages are appended, the result is checked for either
an exact match, if `output` string is specified, or
pattern match, if `pattern` string is given.
Test fails if non-`stdout` message is received before
match or receive times out.
"""
def execfn(code, output=None, pattern=None):
m = message.execute_code(code=code, id=test_id)
m['preparse'] = True
# send block of code to be executed
sagews.send_json(m)
outbuf = ''
while True:
typ, mesg = sagews.recv()
assert typ == 'json'
assert mesg['id'] == test_id
assert 'stdout' in mesg
outbuf += mesg['stdout']
if output is not None:
if output in outbuf:
break
elif pattern is not None:
if re.search(pattern, outbuf) is not None:
break
def fin():
recv_til_done(sagews, test_id)
request.addfinalizer(fin)
return execfn
@pytest.fixture()
def execinteract(request, sagews, test_id):
def execfn(code):
m = message.execute_code(code=code, id=test_id)
m['preparse'] = True
sagews.send_json(m)
typ, mesg = sagews.recv()
assert typ == 'json'
assert mesg['id'] == test_id
assert 'interact' in mesg
def fin():
recv_til_done(sagews, test_id)
request.addfinalizer(fin)
return execfn
@pytest.fixture()
def execblob(request, sagews, test_id):
def execblobfn(code,
want_html=True,
want_javascript=False,
file_type='png',
ignore_stdout=False):
r"""
fixture when test generates an image
INPUT:
- ``file_type`` -- string or list of strings, e.g. ["svg", "png"]
"""
SHA_LEN = 36
# format and send the plot command
m = message.execute_code(code=code, id=test_id)
sagews.send_json(m)
# expect several responses before "done", but order may vary
want_blob = True
want_name = True
while any([want_blob, want_name, want_html, want_javascript]):
typ, mesg = sagews.recv()
if typ == 'blob':
assert want_blob
want_blob = False
# when a blob is sent, the first 36 bytes are the sha1 uuid
print(("blob len %s" % len(mesg)))
file_uuid = mesg[:SHA_LEN].decode()
assert file_uuid == uuidsha1(mesg[SHA_LEN:])
# sage_server expects an ack with the right uuid
m = message.save_blob(sha1=file_uuid)
sagews.send_json(m)
else:
assert typ == 'json'
if 'html' in mesg:
assert want_html
want_html = False
print('got html')
elif 'javascript' in mesg:
assert want_javascript
want_javascript = False
print('got javascript')
elif ignore_stdout and 'stdout' in mesg:
pass
else:
assert want_name
want_name = False
assert 'file' in mesg
print('got file name')
if isinstance(file_type, str):
assert file_type in mesg['file']['filename']
elif isinstance(file_type, list):
assert any([(f0 in mesg['file']['filename']) for f0 in file_type]), \
"missing one of file types {} in response from sage_server".format(file_type)
else:
assert 0
# final response is json "done" message
typ, mesg = sagews.recv()
assert typ == 'json'
assert mesg['done'] == True
return execblobfn
@pytest.fixture()
def execintrospect(request, sagews, test_id):
def execfn(line, completions, target, top=None):
if top is None:
top = line
m = message.introspect(test_id, line=line, top=top)
m['preparse'] = True
sagews.send_json(m)
typ, mesg = sagews.recv()
assert typ == 'json'
assert mesg['id'] == test_id
assert mesg['event'] == "introspect_completions"
assert mesg['completions'] == completions
assert mesg['target'] == target
return execfn
@pytest.fixture(scope="class")
def sagews(request):
r"""
Module-scoped fixture for tests that don't leave
extra threads running.
"""
# setup connection to sage_server TCP listener
host, port = get_sage_server_info()
print(("host %s port %s" % (host, port)))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
sock.settimeout(default_timeout)
print("connected to socket")
# unlock
client_unlock_connection(sock)
print("socket unlocked")
conn = ConnectionJSON(sock)
c_ack = conn._recv(1).decode()
assert c_ack == 'y', "expect ack for token, got %s" % c_ack
# open connection with sage_server and run tests
msg = message.start_session()
msg['type'] = 'sage'
conn.send_json(msg)
print("start_session sent")
typ, mesg = conn.recv()
assert typ == 'json'
pid = mesg['pid']
print(("sage_server PID = %s" % pid))
# teardown needed - terminate session nicely
# use yield instead of request.addfinalizer in newer versions of pytest
def fin():
print("\nExiting Sage client.")
conn.send_json(message.terminate_session())
# wait several seconds for client to die
for loop_count in range(8):
try:
os.kill(pid, 0)
except OSError:
# client is dead
break
time.sleep(0.5)
else:
print(("sending sigterm to %s" % pid))
try:
os.kill(pid, signal.SIGTERM)
except OSError:
pass
request.addfinalizer(fin)
return conn
@pytest.fixture(scope="class")
def own_sage_server(request):
assert os.geteuid() != 0, "Do not run as root, will kill all sage_servers."
print("starting sage_server class fixture")
os.system(start_cmd())
time.sleep(0.5)
def fin():
print("killing all sage_server processes")
os.system("pkill -f sage_server_command_line")
request.addfinalizer(fin)
@pytest.fixture(scope="class")
def test_ro_data_dir(request):
"""
Return the directory containing the test file.
Used for tests which have read-only data files in the test dir.
"""
return os.path.dirname(request.module.__file__)
#
# Write machine-readable report files into the $HOME directory
# http://doc.pytest.org/en/latest/example/simple.html#post-process-test-reports-failures
#
import os
report_json = os.path.expanduser('~/sagews-test-report.json')
report_prom = os.path.expanduser('~/sagews-test-report.prom')
results = []
start_time = None
@pytest.hookimpl
def pytest_configure(config):
global start_time
start_time = datetime.utcnow()
@pytest.hookimpl
def pytest_unconfigure(config):
global start_time
data = {
'name': 'smc_sagews.test',
'version': 1,
'start': str(start_time),
'end': str(datetime.utcnow()),
'fields': ['name', 'outcome', 'duration'],
'results': results,
}
with open(report_json, 'w') as out:
json.dump(data, out, indent=1)
# this is a plain text prometheus report
# https://prometheus.io/docs/instrumenting/exposition_formats/#text-format-details
# timestamp milliseconds since epoch
ts = int(1000 * time.mktime(start_time.timetuple()))
# first write to temp file ...
report_prom_tmp = report_prom + '~'
with open(report_prom_tmp, 'w') as prom:
for (name, outcome, duration) in results:
labels = 'name="{name}",outcome="{outcome}"'.format(**locals())
line = 'sagews_test{{{labels}}} {duration} {ts}'.format(**locals())
prom.write(line + '\n')
# ... then atomically overwrite the real one
os.rename(report_prom_tmp, report_prom)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
if rep.when != "call":
return
#import pdb; pdb.set_trace() # uncomment to inspect item and rep objects
# the following `res` should match the `fields` above
# parent: item.parent.name could be interesting, but just () for auto discovery
name = item.name
test_ = 'test_'
if name.startswith(test_):
name = name[len(test_):]
res = [name, rep.outcome, rep.duration]
results.append(res)
|
agpl-3.0
|
andipeng/MagnePlane
|
src/hyperloop/Python/ticket_cost.py
|
4
|
8796
|
from __future__ import print_function
import numpy as np
from openmdao.api import IndepVarComp, Component, Group, Problem, ExecComp
import matplotlib.pylab as plt
class TicketCost(Component):
'''
Notes
-------
This Component takes into account various cost figures from the system model and combines them to estimate tickt cost per passenger.
Params
-------
length_cost : float
Cost of materials per unit length. Default value is 2.437e6 USD/km
pod_cost : float
Cost per individual pod. Default value is 1.0e6 USD.
capital_cost : float
Estimate of overhead capital cost. Default value is 1.0e10 USD.
energy_cost : float
Cost of electricity. Default value is .13 USD/kWh
ib : float
Bond interest rate. Default value is .04
bm : float
Bond maturity. Default value is 20.0 years.
operating_time : float
operating time per day. Default value is 16.0*3600 s
JtokWh : float
Convert J to kWh. Default value is J/kWh
m_pod : float
Pod mass. Default value is 3100 kg
n_passengers : float
Number of passengers. Default value is 28.0
pod_period : float
Time in between pod departures. Default value is 120.0 s
avg_speed : float
average pod speed. Default value is 286.86 m/s
track_length : float
length of the track. Default value is 600e3 m
pod_power : float
Power consumption of the pod. Default value is 1.5e6 W
prop_power : float
power of an individual propulsion section. Default value is 350e3 W
vac_power : float
Power of the vacuum pumps. Default value is 71.049e6 W
alpha : float
percent of vacuum power used in steady state. Default value is .0001
vf : float
Pod top speed. Default value is 286.86 m/s
g : float
Gravity. Default value is 9.81 m/s/s
Cd : float
Pod drag coefficient. Default value is .2
S : float
Pod planform area. Default value is 40.42 m**2
p_tunnel : float
Tunnel pressure. Default value is 850.0 Pa
T_tunnel : float
Tunnel temperature. Default value is 320 K
R : float
Ideal gas constant. Default value is 287 J/kg/K
eta : float
Efficiency of propulsion system
D_mag : float
Magnetic drag. Default value is (9.81*3100.0)/200.0 N
thrust_time : float
Time spent during a propulsive section. Default value is 1.5 s
prop_period : float
distance between pripulsion sections. Defualt value is 25.0e3 km
Returns
-------
ticket_cost : float
cost of individual ticket. Default value is 0.0 USD
prop_energy_cost : float
cost of energy used by propulsion section per year. Default value is 0.0 USD
'''
def __init__(self):
super(TicketCost, self).__init__()
self.add_param('land_cost', val = 2.437e6, desc = 'Cost of materials over land per unit length', units = 'USD/km')
self.add_param('water_cost', val = 389.346941e3, desc = 'Cost of materials underwater per unit length', units = 'USD/km')
self.add_param('pod_cost', val = 1.0e6, desc = 'Cost of individual pod', units = 'USD')
self.add_param('capital_cost', val = 1.0e10, desc = 'Estimate of overhead capital cost', units = 'USD')
self.add_param('energy_cost', val = .13, desc = 'Cost of electricity', units = 'USD/kW/h')
self.add_param('ib', val = .04, desc = 'Bond interest rate', units = 'unitless')
self.add_param('bm', val = 20.0, desc = 'Bond maturity', units = 'yr')
self.add_param('operating_time', val = 16.0*3600, desc = 'Operating time per day', units = 's')
self.add_param('JtokWh', val = 2.7778e-7, desc = 'Convert Joules to kWh', units = '(kw*h)/J')
self.add_param('m_pod', val = 3100.0, desc = 'Pod Mass', units = 'kg')
self.add_param('n_passengers', val = 28.0, desc = 'number of passengers', units = 'unitless')
self.add_param('pod_period', val = 120.0, desc = 'Time in between departures', units = 's')
self.add_param('avg_speed', val = 286.86, desc = 'Average Pod Speed', units = 'm/s')
self.add_param('track_length', val = 600.0e3, desc = 'Track Length', units = 'm')
self.add_param('land_length', val = 600e3, desc = 'Length traveled over land', units = 'm')
self.add_param('water_length', val = 0.0e3, desc = 'Length traveled underwater', units = 'm')
self.add_param('pod_power', val = 1.5e6, desc = 'Power required by pod motor', units = 'W')
self.add_param('prop_power', val = 350.0e3, desc = 'Power of single propulsive section', units = 'W')
self.add_param('vac_power', val = 71.049e6, desc = 'Power of vacuums', units = 'W')
self.add_param('steady_vac_power', val = 950.0e3, desc = 'Steady State run power of vacuum pumps', units = 'W')
self.add_param('vf', val = 286.86, desc = 'Pod top speed', units = 'm/s')
self.add_param('g', val = 9.81, desc = 'Gravity', units = 'm/s/s')
self.add_param('Cd', val = .2, desc = 'Pod drag coefficient', units = 'unitless')
self.add_param('S', val = 40.42, desc = 'Pod planform area', units = 'm**2')
self.add_param('p_tunnel', val = 850.0, desc = 'Tunnel Pressure', units = 'Pa')
self.add_param('T_tunnel', val = 320.0, desc = 'Tunnel Temperature', units = 'K')
self.add_param('R', val = 287.0, desc = 'Ideal gas constant', units = 'J/kg/K')
self.add_param('eta', val = .8, desc = 'Propulsive efficiency', units = 'unitless')
self.add_param('D_mag', val = (9.81*3100.0)/200.0, desc = 'Magnetic Drag', units = 'N')
self.add_param('thrust_time', val = 1.5, desc = 'Time that pod is over propulsive section', units = 's')
self.add_param('prop_period', val = 25.0e3, desc = 'distance between propulsive sections', units = 'm')
self.add_param('num_thrust', val = 10.0, desc = 'Number of booster sections along track', units = 'unitless')
self.add_output('num_pods', val = 0.0, desc = 'Number of Pods', units = 'unitless')
self.add_output('ticket_cost', val = 0.0, desc = 'Ticket cost', units = 'USD')
self.add_output('prop_energy_cost', val = 0.0, desc = 'Cost of propulsion energy', units = 'USD')
self.add_output('tube_energy_cost', val = 0.0, desc = 'Cost of tube energy', units = 'USD')
self.add_output('total_energy_cost', val = 0.0, desc = 'Cost of energy consumpition per year', units = 'USD')
def solve_nonlinear(self, p, u,r):
land_cost = p['land_cost']
water_cost = p['water_cost']
pod_cost= p['pod_cost']
capital_cost = p['capital_cost']
energy_cost = p['energy_cost']
ib = p['ib']
bm = p['bm']
operating_time = p['operating_time']
JtokWh = p['JtokWh']
m_pod = p['m_pod']
n_passengers = p['n_passengers']
pod_period = p['pod_period']
avg_speed = p['avg_speed']
track_length = p['track_length']
land_length = p['land_length']
water_length = p['water_length']
pod_power = -1.0*p['pod_power']
prop_power = p['prop_power']
vac_power = p['vac_power']
steady_vac_power = -1.0*p['steady_vac_power']
vf = p['vf']
g = p['g']
Cd = p['Cd']
S = p['S']
p_tunnel = p['p_tunnel']
T_tunnel = p['T_tunnel']
R = p['R']
eta = p['eta']
D_mag = p['D_mag']
thrust_time = p['thrust_time']
prop_period = p['prop_period']
num_thrust = p['num_thrust']
length_cost = ((water_length/track_length)*water_cost) + ((land_length/track_length)*land_cost)
pod_frequency = 1.0/pod_period
num_pods = np.ceil((track_length/avg_speed)*pod_frequency)
flights_per_pod = (operating_time*pod_frequency)/num_pods
energy_per_flight = pod_power*(track_length/avg_speed)*.9
pod_energy = energy_per_flight*flights_per_pod*num_pods*JtokWh
vac_energy = steady_vac_power*operating_time*JtokWh
rho = p_tunnel/(R*T_tunnel)
start_distance = (vf**2)/(2*g)
start_energy = ((m_pod*g+D_mag)*start_distance + (.5*Cd*rho*g*S*(start_distance**2)))/eta
prop_energy = (num_thrust*thrust_time*prop_power + start_energy)*flights_per_pod*num_pods*JtokWh
tube_energy = prop_energy + vac_energy
u['num_pods'] = num_pods
u['prop_energy_cost'] = prop_energy*energy_cost*365
u['tube_energy_cost'] = tube_energy*energy_cost*365
u['total_energy_cost'] = (pod_energy+tube_energy)*energy_cost*365
u['ticket_cost'] = cost_ticket = (length_cost*(track_length/1000.0) + pod_cost*num_pods + capital_cost*(1.0+ib) + \
energy_cost*(tube_energy + pod_energy)*365.0)/(n_passengers*pod_frequency*bm*365.0*24.0*3600.0)
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (('n_passengers', 28.0),
('track_length', 600.0e3, {'units' : 'm'}))
root.add('p', TicketCost())
root.add('des_vars', IndepVarComp(params), promotes = ['n_passengers'])
root.connect('n_passengers', 'p.n_passengers')
root.connect('des_vars.track_length', 'p.track_length')
top.setup()
top.run()
print(top['p.ticket_cost'])
# n_passengers = np.linspace(10,100,num = 90)
# ticket_cost = np.zeros((1, len(n_passengers)))
# for i in range(len(n_passengers)):
# top['n_passengers'] = n_passengers[i]
# top.run()
# ticket_cost[0, i] = top['p.ticket_cost']
# plt.plot(n_passengers*(175200.0/(1.0e6)), ticket_cost[0,:])
# plt.show()
|
apache-2.0
|
Foucl/mymu-psychopy
|
docs/mymu.py
|
1
|
16983
|
# -*- coding: utf-8 -*-
# -- ==mymu_imports== --
#%% mymu-imports
import sys
sys.dont_write_bytecode = False
import numpy.random as rnd
import numpy as np
import pandas as pd
from PyQt5 import QtCore, QtGui, QtWidgets
import os
import glob
from collections import OrderedDict
import psychopy
reload(psychopy)
from psychopy import visual, data, core, event, logging
import psychopy_ext
reload(psychopy_ext)
from psychopy_ext import exp
from pyglet import app
import scripts.computer as computer
#reload(computer)
reload(exp)
from IPython.core.debugger import Tracer
# for testing - psychopy's moviestim doesn't seem to take care of files already opened
#import win32file
#win32file._setmaxstdio(2048)
# -- ==mymu_imports== --
# -- ==mymu_class== --
#%%
PATHS = exp.set_paths('mymu', computer)
PATHS['vid'] = './vid/'
PATHS['img'] = './img/'
try:
del(thismu)
del(MyMu)
except:
pass
class MyMu(exp.Experiment):
"""
Willkommen zu unserem Experiment
================================
Drücken Sie eine **beliebige Taste**, um zu starten.
"""
def __init__(self,
name='mymu',
version='0.9',
info=OrderedDict([('subjid', 'mm_'),
('session', 1),
]),
rp=OrderedDict([ # these control how the experiment is run
('no_output', False), # do you want output? or just playing around?
('debug', False), # not fullscreen presentation etc
('autorun', 0), # if >0, will autorun at the specified speed
('log_level', 40)
]),
actions='run'
):
super(MyMu, self).__init__(name=name, info=info,
rp=rp, actions=actions,
paths=PATHS, computer=computer, version=version)#, blockcol='block')
# user-defined parameters
self.nvalid = 60
self.ninvalid = 20
self.stimsize = (9, 6) # in deg
self.isi = 1.2
ids = {}
self.out_movie_frames = []
self.all_stims = [x for x in os.listdir(self.paths["vid"])]
male = [x for x in self.all_stims if x[5] == 'm']
female = [x for x in self.all_stims if x[5] == 'w']
happy = [x for x in self.all_stims if x[0:3] == 'fre']
disgusted = [x for x in self.all_stims if x[0:3] == 'ekl']
self.ids = {}
self.ids['male'] = list(set([x[5:8] for x in male]))
self.ids['female'] = list(set([x[5:8] for x in female]))
self.factors = {'expression': ['disgusted', 'happy'], 'validity': ['valid', 'invalid']}
self.conditions=[
OrderedDict({'texpr':'ekl', 'validity': 'valid', 'pexpr': 'ekl', 'weight': self.nvalid}),
OrderedDict({'texpr':'ekl', 'validity': 'invalid', 'pexpr': 'fre', 'weight': self.ninvalid}),
OrderedDict({'texpr':'fre', 'validity': 'valid', 'pexpr': 'fre', 'weight': self.nvalid}),
OrderedDict({'texpr':'fre', 'validity': 'invalid', 'pexpr': 'ekl', 'weight': self.ninvalid})
]
def create_stimuli(self):
"""Define your stimuli here, store them in self.s
"""
testmov = os.path.join(self.paths["vid"], 'ekl99m03.avi')
self.set_logging(level=self.rp['log_level'])
self.all_movie_frames = []
self.all_mov = {}
for stim in self.all_stims:
if self.rp['dict']:
mov = visual.MovieStim3(self.win, filename=os.path.join(self.paths["vid"], stim), size=(511, 768), name='PrimeVid', noAudio=True, autoLog=True)
else:
mov = ''
self.all_mov[stim] = mov
self.create_fixation()
self.s = {}
self.s['fix']= self.fixation
self.s['mov'] = visual.MovieStim3(self.win, filename=testmov, size=(511, 768), name='PrimeVid', noAudio=True, autoLog=True)
self.s['blank'] = visual.ImageStim(self.win, size=(0,0), autoLog=True)
self.s['target'] = visual.ImageStim(self.win, size=self.stimsize, name='TargetPic', autoLog=True)
self.win.setRecordFrameIntervals(False)
#self.win.waitBlanking = False
def create_win(self, *args, **kwargs):
super(MyMu, self).create_win(units='deg', color=(100,100,100),
*args, **kwargs)
#app.run()
def make_trials(self):
self.trial_list = []
ids = self.ids
for c in self.conditions:
# TODO: make it possible to just set nTrials and propValid!! TDD!
idtmp = []
stimftmp = []
if c['validity'] == 'valid':
idtmp = ids['male'][:] + list(rnd.choice(ids['male'], 10)) + idtmp + ids['female'][:] + list(rnd.choice(ids['female'], 11))
stimftmp = [c['pexpr'] + '99' + x + '.avi' for x in idtmp]
elif c['validity'] == 'invalid':
idtmp = list(rnd.choice(ids['male'], 10)) + list(rnd.choice(ids['male'], 10))
stimftmp = [c['pexpr'] + '99' + x + '.avi' for x in idtmp]
for stim in stimftmp:
self.trial_list.extend([OrderedDict({'texpr': c['texpr'], 'validity': c['validity'], 'pexpr': c['pexpr'], 'stimf': stim, 'mov':self.all_mov[stim], 'sex': stim[3], 'id': stim[4:6]})])
#rnd.shuffle(self.trial_list) not necessary, psychopy_ext takes care of shuffling!
#return trial_list
def save_frame(self, n_rep=None, buffer='front', name=None):
# TODO: only check for save_movie here
if not name:
name = self.this_event.name
if not n_rep:
n_rep = int(self.this_event.dur * 30)
if n_rep < 1:
nrep = 1
im = self.win._getFrame(buffer)
self.all_movie_frames.append((n_rep, im, name))
# overwrite at leaste idle_event for movie-creation
def before_event(self, *args, **kwargs):
super(MyMu, self).before_event(*args, **kwargs)
if self.rp['save_mov'] :
#self.win.getMovieFrame(buffer='front')
self.save_frame()
def post_trial(self, *args, **kwargs):
super(MyMu, self).post_trial(*args, **kwargs)
if self.rp['save_mov'] and self.thisTrialN == 2:
#self.win.saveMovieFrames('./exp_run_t1-7.mp4') # TODO: create custom function
self.save_movie_frames()
#for frame in self.all_movie_frames:
# print frame[0], frame[1], frame[2]
self.quit(message='movie of first three trials saved, nothing to do anymore')
def save_movie_frames(self, filen='./exp_t1-7.mp4'):
frames = self.all_movie_frames
# seems super easy, copied from psychopy.visual.Window.saveMovieFrames
from moviepy.editor import ImageSequenceClip
numpyFrames = []
n = 0
i = 0
for n_rep, frame, name in frames: # BUT: now I have to bite the dust and create many frames? can this be done more efficiently?
if name == 'prime_rest':
n += 1
elif name == 'isi':
print "saving prime_rest", n, "times"
n = 0
print "saving isi", n_rep, "times"
else:
print "saving", name, n_rep, "times"
for _ in xrange(n_rep):
numpyFrames.append(np.array(frame))
clip = ImageSequenceClip(numpyFrames, fps=30)
clip.write_videofile(filen, codec='libx264')
self.all_movie_frames = []
def play_simple(self, *args, **kwargs):
n_rep_first = 22
ut = self.rp['unittest']
if ut:
n_rep_first = 2
show_ft = False
self.s['mov'].draw()
# save a lot of memory by just saving the backbuffer with n_rep = n_rep_first/2!
self.save_frame(n_rep=(n_rep_first+2)/2,buffer='back', name='prime_neutral')
self.s['mov'].seek(0.0)
self.s['mov'].pause()
save = self.rp['save_mov']
# TODO: add way to quit using exitkey
i = 0
print "\n"
while (self.s['mov'].status != visual.FINISHED):
if i == n_rep_first:
self.s['mov'].play()
self.s['mov'].seek((1/30.))
self.s['mov']._videoClock.reset(self.s['mov']._videoClock.getTime())
self.s['mov'].draw()
t = self.s['mov'].getCurrentFrameTime()
self.win.flip()
if save and i > n_rep_first and i%2 == 0:
self.save_frame(n_rep=1, buffer='front', name='prime_rest')
self.s['mov'].seek(t + (1/30.))
self.s['mov']._videoClock.reset(self.s['mov']._videoClock.getTime())
print "the frametime at flip", i, "was", t
#print "======="
i += 1
key = self.last_keypress()
print "new supersimple loop flipped", i, "times"
return
n_neut = 1
for i in xrange(n_rep_first):
self.s['mov'].draw()
if i == 0:
beg = self.trial_clock.getTime()
if (i+3)%2==0 and show_ft:
ft = self.s['mov'].getCurrentFrameTime() + (1/30.)
ft_ms = ft * 1000
frame_no = round(ft * 30)
text = "frametime: %6.2fms \nframe: %d\niteration/flip/frame refresh: %d\ntotal iteration: %d\n_nextFrameT=%6.2f, _videoClock=%6.2f" % (ft_ms, frame_no,i+1, n_neut, self.s['mov']._nextFrameT, self.s['mov']._videoClock.getTime())
#ts = visual.TextStim(self.win, text=text, pos=(0,-5.5))
#ts.draw()
self.win.flip()
if (i+3)%2==0 and show_ft and False:
if 'escape' in event.waitKeys():
self.quit()
n_neut += 1
self.s['mov'].play()
self.s['mov']._videoClock.reset()
for i in xrange(2):
self.s['mov'].draw()
ft = self.s['mov'].getCurrentFrameTime() + (1/30.)
ft_ms = ft * 1000
frame_no = round(ft * 30)
text = "frametime: %6.2fms \nframe: %d\niteration/flip/frame refresh: %d\ntotal iteration: %d\n_nextFrameT=%6.2f, _videoClock=%6.2f" % (ft_ms, frame_no,i+1, n_neut, self.s['mov']._nextFrameT, self.s['mov']._videoClock.getTime())
#ts = visual.TextStim(self.win, text=text, pos=(0,-5.5))
#ts.draw()
self.win.flip()
if show_ft and False:
if 'escape' in event.waitKeys():
self.quit()
n_neut += 1
end_neut = self.trial_clock.getTime()
print "neutral phase (21 refreshes, 10 frames) took", end_neut-beg, "s"
a = self.s['mov'].getCurrentFrameTime()
print "frametime before entering movement-loop is", a
self.s['mov'].seek(0.0)
i = 0
j = 0
#Tracer()()
#self.s['mov']._videoClock.reset()
while (self.s['mov'].status != visual.FINISHED):
self.s['mov'].draw()
if (i+3)%1==0 and show_ft:
ft = self.s['mov'].getCurrentFrameTime() + (1/30.)
ft_ms = ft * 1000
frame_no = round(ft * 30)
text = "frametime: %6.2fms \nframe: %d\niteration/flip/frame refresh: %d\ntotal iteration: %d\n_nextFrameT=%6.2f, _videoClock=%6.2f" % (ft_ms, frame_no,i+1, n_neut, self.s['mov']._nextFrameT, self.s['mov']._videoClock.getTime())
#ts = visual.TextStim(self.win, text=text, pos=(0,-5.5))
#ts.draw()
self.win.flip()
if (i+3)%1==0 and show_ft:
if 'escape' in event.waitKeys():
self.quit()
#self.win.flip()
if save and (i+2)%2==0:
t_pre = self.s['mov']._videoClock.getTime()
self.save_frame(n_rep=1, buffer='front', name='prime_rest')
self.s['mov'].seek((1/30. * (i/2) + (1/30.)))
self.s['mov']._videoClock.reset(self.s['mov']._videoClock.getTime())
t_post = self.s['mov']._videoClock.getTime()
print "in iteration", i
print "video Clock after seek:", t_post
print "frametime after seek", self.s['mov'].getCurrentFrameTime()
print "netframet after seek", self.s['mov']._nextFrameT
print "============="
# print round((t_post - t_pre)*1000, 3)
#print "nextFrameT for i=", i, "should be", round(self.s['mov']._nextFrameT * 1000, 3)
#print "time after reset:", self.s['mov']._videoClock.getTime()
#Tracer()()
if ut and i > 4:
self.s['mov'].stop()
return
i += 1
n_neut += 1
print " last run of loop went for", i, "iterations (was: 20, should be: 40!)"
print " total flips: ", n_neut
# finished, should I replay the final frame a couple of times to get to 1s?
end = self.trial_clock.getTime()
print ", duration of movie", end-beg
self.s['mov'].stop()
#
def create_trial(self):
"""Define trial composition
"""
self.trial = [exp.Event(self,
name='fix',
dur=3, # in seconds
display=self.s['fix'],
func=self.idle_event),
#exp.Event(self,
#dur=1,
#name='prime',
#display=self.s['mov'], # just get the 'stimf' from the trialhandler/exp_plan/conditions?
#func=self.play_simple),
exp.Event(self,
name='isi',
dur=3,
display=self.s['blank'],
func=self.idle_event),
exp.Event(self,
name='target',
dur=1.300,
display=self.s['target'],
#draw_stim=False,
func=self.idle_event),
exp.Event(self,
name='iti',
dur=1.5,
display=self.s['blank'],
func=self.idle_event)]
def create_exp_plan(self):
"""Put together trials
DURATION??
check image files here; randomize here
"""
self.make_trials()
trials = self.trial_list
exp_plan = []
i = 1
for trl in trials:
exp_plan.append(OrderedDict([
('block', 'emo_fac'),
('trialno', i),
('texpr', trl['texpr']), # is this how that should be done? (trl could be generated by make_trials above)
('validity', trl['validity']),
('pexpr', trl['pexpr']),
('stimf', trl['stimf']),
('mov', trl['mov']),
('sex', trl['sex']),
('id', trl['id']),
('onset', ''), # empty ones will be filled up
('dur', ''), # during runtime
('corr_resp', ''),
('subj_resp', ''),
('accuracy', ''),
('rt', ''),
]))
i += 1
self.exp_plan = exp_plan
def before_trial(self):
"""Set up stimuli prior to a trial
"""
if self.rp['dict']:
self.s['mov'] = self.this_trial['mov']
else:
vid_fname = os.path.join(self.paths["vid"], self.this_trial['stimf'])
self.s['mov'].setMovie(vid_fname)
self.s['target'].setImage(os.path.join(self.paths['img'], self.this_trial['validity'] + '_grey.png'))
thismu = MyMu(rp={'no_output':True, 'debug':True, 'dict': False, 'autorun': 0, 'log_level': logging.WARNING, 'unittest': False, 'save_mov':False})
#del MyMu # should take care of opened files?
# -- ==mymu_class== --
# -- ==mymu_run_thismu== --
# %%
thismu.run()
# -- ==mymu_run_thismu== --
|
mit
|
darioizzo/pykep
|
pykep/trajopt/_direct.py
|
2
|
16870
|
import pykep as pk
import numpy as np
class _direct_base(object):
"""Base class for direct trajectory optimisation problems with one only leg.
All inheriting classes will adopt, ``plot_traj``, ``plot_control``, and ``get_traj``.
"""
def __init__(self, mass=1000., thrust=0.3, isp=3000., nseg=10, mu=pk.MU_SUN, hf=False):
# segements
if isinstance(nseg, int):
self.nseg = nseg
else:
raise TypeError("nseg must be supplied as int.")
# spacecraft
self.sc = pk.sims_flanagan.spacecraft(mass, thrust, isp)
# leg
self.leg = pk.sims_flanagan.leg()
self.leg.set_spacecraft(self.sc)
self.leg.set_mu(mu)
self.leg.high_fidelity = hf
def fitness(self, z):
"""This function will be redefined in the inheriting classes
"""
pass
def _plot_traj(self, z, axis, units):
"""This function will be redefined in the inheriting classes
"""
pass
@staticmethod
def _get_controls(z):
"""This function will be redefined in the inheriting classes
"""
pass
def _pretty(self, z):
"""This function will be redefined in the inheriting classes
"""
pass
def get_nobj(self):
return 1
def get_nec(self):
return 7
def get_nic(self):
return self.nseg
def plot_traj(self, z, units=pk.AU, N=20, axes=None):
"""This function plots the 3 dimensional spacecraft trajectory, given a solution chromosome.
Args:
- z (``list``, ``tuple``, ``numpy.ndarray``): Decision chromosome, e.g. (``pygmo.population.champion_x``).
- units (``float``, ``int``): units by which to scale the trajectory dimensions.
- N (``int``): Number of points to be plotted along one arc.
"""
import matplotlib.pyplot as plt
# a call to the fitness on the chromosome z will change the class data member leg and set it
# to represent the data in the chromosome z
self.fitness(z)
# creates a figure if needed
if axes is None:
fig = plt.figure()
axes = fig.gca(projection='3d')
# plots a small Sun
axes.scatter([0], [0], [0], color='y')
# plots the leg
pk.orbit_plots.plot_sf_leg(self.leg, units=units, N=20, axes=axes)
# plots problem specifics
self._plot_traj(z, axes, units)
return axes
def plot_control(self, z, mark="k.-", time=True, axes=None):
"""Plots the control profile of the trajectory, as a function of time.
Args:
- z (``list``, ``tuple``, ``numpy.ndarray``): Decision chromosome, e.g. (``pygmo.population.champion_x``).
- mark (``string``): matplotlib marker.
- time (``bool``): If ``True``, x-axis is time in ``mjd2000``. If ``False``, x-axis is node index.
"""
import matplotlib.pyplot as plt
# data
traj = self.get_traj(z)
# time
t = traj[:, 0] - traj[0, 0]
# throttle
u = traj[:, 8]
# figure
if axes is None:
plt.figure()
axes = plt.gca()
# with time
if time:
axes.plot(t, u, mark)
plt.xlabel("Time [days]")
# without time
elif not time:
axes.plot(u, mark)
plt.xlabel("Segment number")
# no se
else:
raise RuntimeError("Something is wrong!")
# label
plt.ylabel("Throttle [ND]")
return axes
def get_traj(self, z):
"""Retrieves the trajectory information.
The returned np.array contains::
traj = [[t0, x0, y0, z0, vx0, vy0, vz0, m0, u0, ux0, uy0, uz0]
...
[tf, xf, yf, zf, vxf, vyf, vzf, mf, uf, uxf, uyf, uzf]]
Args:
- z (``list``, ``tuple``, ``numpy.ndarray``): Decision chromosome, e.g. (``pygmo.population.champion_x``).
Returns:
np.array full information on states and controls along the trajectory nodes
"""
# set leg
self.fitness(z)
# get states
x = list(self.leg.get_states())
# clean states
for i in range(len(x)):
# remove matchpoint duplicate
x[i].pop(self.nseg)
# convert to numpy.ndarray
x[i] = np.asarray(x[i], np.float64)
# time and mass
if i in [0, 3]:
x[i].reshape((self.nseg * 2 + 1, 1))
# position and velocity
elif i in [1, 2]:
x[i].reshape((self.nseg * 2 + 1, 3))
else:
raise RuntimeError("Something is wrong!")
# unpack states
t, r, v, m = x
# control
u = self._get_controls(z)
# since controls are only defined at midpoints we need to add the
# values at the non midpoint nodes
tmp = [0] * len(u) * 2
for i in range(self.nseg):
tmp[i * 6] = u[i * 3]
tmp[i * 6 + 1] = u[i * 3 + 1]
tmp[i * 6 + 2] = u[i * 3 + 2]
tmp[i * 6 + 3] = u[i * 3]
tmp[i * 6 + 4] = u[i * 3 + 1]
tmp[i * 6 + 5] = u[i * 3 + 2]
tmp.append(u[-3])
tmp.append(u[-2])
tmp.append(u[-1])
u = np.asarray(tmp, np.float64).reshape((self.nseg * 2 + 1, 3))
# throttle
umag = np.linalg.norm(u, axis=1).reshape((self.nseg * 2 + 1, 1))
# full dataset [t, x, y, z, vx, vy, vz, m, u, ux, uy, uz]
return np.hstack((t.reshape((self.nseg * 2 + 1, 1)), r, v, m.reshape((self.nseg * 2 + 1, 1)), umag, u))
def pretty(self, z):
"""
pretty(x)
Args:
- x (``list``, ``tuple``, ``numpy.ndarray``): Decision chromosome, e.g. (``pygmo.population.champion_x``).
Prints human readable information on the trajectory represented by the decision vector x
"""
data = self.get_traj(z)
self._pretty(z)
print("\nSpacecraft Initial Position (m) : [{!r}, {!r}, {!r}]".format(
data[0, 1], data[0, 2], data[0, 3]))
print("Spacecraft Initial Velocity (m/s): [{!r}, {!r}, {!r}]".format(
data[0, 4], data[0, 5], data[0, 6]))
print("Spacecraft Initial Mass (kg) : {!r}".format(data[0, 7]))
print("Spacecraft Final Position (m) : [{!r}, {!r}, {!r}]".format(
data[-1, 1], data[-1, 2], data[-1, 3]))
print("Spacecraft Final Velocity (m/s): [{!r}, {!r}, {!r}]".format(
data[-1, 4], data[-1, 5], data[-1, 6]))
print("Spacecraft Final Mass (kg) : {!r}".format(data[-1, 7]))
class direct_pl2pl(_direct_base):
"""Represents a direct transcription transfer between solar system planets.
This problem works by manipulating the starting epoch t0, the transfer time T the final mass mf and the controls
The decision vector is::
z = [t0, T, mf, Vxi, Vyi, Vzi, Vxf, Vyf, Vzf, controls]
"""
def __init__(self,
p0="earth",
pf="mars",
mass=1000,
thrust=0.3,
isp=3000,
nseg=20,
t0=[500, 1000],
tof=[200, 500],
vinf_dep=1e-3,
vinf_arr=1e-3,
hf=False):
"""Initialises a direct transcription orbit to orbit problem.
Args:
- p0 (``str``): Departure planet name. (will be used to construct a planet.jpl_lp object)
- pf (``str``): Arrival planet name. (will be used to construct a planet.jpl_lp object)
- mass (``float``, ``int``): Spacecraft wet mass [kg].
- thrust (``float``, ``int``): Spacecraft maximum thrust [N].
- isp (``float``, ``int``): Spacecraft specific impulse [s].
- nseg (``int``): Number of colocation nodes.
- t0 (``list``): Launch epochs bounds [mjd2000].
- tof (``list``): Transfer time bounds [days].
- vinf_dep (``float``): allowed launch DV [km/s]
- vinf_arr (``float``): allowed arrival DV [km/s]
- hf (``bool``): High-fidelity. Activates a continuous representation for the thrust.
"""
# initialise base
_direct_base.__init__(self, mass, thrust, isp, nseg, pk.MU_SUN, hf)
# planets
if all([isinstance(pl, str) for pl in [p0, pf]]):
self.p0 = pk.planet.jpl_lp(p0)
self.pf = pk.planet.jpl_lp(pf)
else:
raise TypeError("Planet names must be supplied as str.")
# bounds TODO check
self.t0 = t0
self.tof = tof
# boundary conditions on velocity
self.vinf_dep = vinf_dep * 1000 # (in m)
self.vinf_arr = vinf_arr * 1000 # (in m)
# The class is built around solar system plaents hence mu is always the
# SUN
self.mu = pk.MU_SUN
def fitness(self, z):
# epochs (mjd2000)
t0 = pk.epoch(z[0])
tf = pk.epoch(z[0] + z[1])
# final mass
mf = z[2]
# controls
u = z[9:]
# compute Cartesian states of planets
r0, v0 = self.p0.eph(t0)
rf, vf = self.pf.eph(tf)
# add the vinfs from the chromosome
v0 = [a + b for a, b in zip(v0, z[3:6])]
vf = [a + b for a, b in zip(vf, z[6:9])]
# spacecraft states
x0 = pk.sims_flanagan.sc_state(r0, v0, self.sc.mass)
xf = pk.sims_flanagan.sc_state(rf, vf, mf)
# set leg
self.leg.set(t0, x0, u, tf, xf)
# compute equality constraints
ceq = np.asarray(self.leg.mismatch_constraints(), np.float64)
# nondimensionalise equality constraints
ceq[0:3] /= pk.AU
ceq[3:6] /= pk.EARTH_VELOCITY
ceq[6] /= self.sc.mass
# compute inequality constraints
cineq = np.asarray(self.leg.throttles_constraints(), np.float64)
# compute inequality constraints on departure and arrival velocities
v_dep_con = (z[3] ** 2 + z[4] ** 2 + z[5] ** 2 - self.vinf_dep ** 2)
v_arr_con = (z[6] ** 2 + z[7] ** 2 + z[8] ** 2 - self.vinf_arr ** 2)
# nondimensionalize inequality constraints
v_dep_con /= pk.EARTH_VELOCITY ** 2
v_arr_con /= pk.EARTH_VELOCITY ** 2
return np.hstack(([-mf], ceq, cineq, [v_dep_con, v_arr_con]))
def get_nic(self):
return super().get_nic() + 2
def get_bounds(self):
lb = [self.t0[0], self.tof[0], self.sc.mass * 0.1] + \
[-self.vinf_dep] * 3 + [-self.vinf_arr] * 3 + \
[-1, -1, -1] * self.nseg
ub = [self.t0[1], self.tof[1], self.sc.mass] + \
[self.vinf_dep] * 3 + [self.vinf_arr] * 3 + \
[1, 1, 1] * self.nseg
return (lb, ub)
def _plot_traj(self, z, axis, units):
# times
t0 = pk.epoch(z[0])
tf = pk.epoch(z[0] + z[1])
# plot Keplerian
pk.orbit_plots.plot_planet(
self.p0, t0, units=units, color=(0.8, 0.8, 0.8), axes=axis)
pk.orbit_plots.plot_planet(
self.pf, tf, units=units, color=(0.8, 0.8, 0.8), axes=axis)
def _pretty(self, z):
print("\nLow-thrust NEP transfer from " +
self.p0.name + " to " + self.pf.name)
print("\nLaunch epoch: {!r} MJD2000, a.k.a. {!r}".format(
z[0], pk.epoch(z[0])))
print("Arrival epoch: {!r} MJD2000, a.k.a. {!r}".format(
z[0] + z[1], pk.epoch(z[0] + z[1])))
print("Time of flight (days): {!r} ".format(z[1]))
print("\nLaunch DV (km/s) {!r} - [{!r},{!r},{!r}]".format(np.sqrt(
z[3]**2 + z[4]**2 + z[5]**2) / 1000, z[3] / 1000, z[4] / 1000, z[5] / 1000))
print("Arrival DV (km/s) {!r} - [{!r},{!r},{!r}]".format(np.sqrt(
z[6]**2 + z[7]**2 + z[8]**2) / 1000, z[6] / 1000, z[7] / 1000, z[8] / 1000))
@staticmethod
def _get_controls(z):
return z[9:]
class direct_or2or(_direct_base):
"""Represents a direct transcription transfer between orbits.
This problem works by manipulating the time of flight ``T``, final mass ``mf`` and mean anomolies ``M0, Mf``.
::
z = [T, mf, M0, Mf, controls]
"""
def __init__(self, elem0, elemf, mass, thrust, isp, nseg, Tlb, Tub, E0lb, E0ub, Eflb, Efub, mu=pk.MU_SUN, hf=True):
"""Initialises a direct transcription orbit to orbit problem.
Args:
- elem0 (``list``, ``tuple``, ``numpy.ndarray``): Departure Keplerian elements. The eccentric anomlay will be manipulated.
- elemf (``list``, ``tuple``, ``numpy.ndarray``): Arrival Keplerian elements. The eccentric anomlay will be manipulated.
- mass (``float``, ``int``): Spacecraft wet mass [kg].
- thrust (``float``, ``int``): Spacecraft maximum thrust [N].
- isp (``float``, ``int``): Spacecraft specific impulse [s].
- nseg (``int``): Number of colocation nodes.
- Tlb (``float``, ``int``): Minimimum time of flight [mjd2000].
- Tub (``float``, ``int``): Maximum time of flight [mjd2000].
- E0lb (``float``, ``int``): Minimum departure eccentric anomoly [rad].
- E0ub (``float``, ``int``): Maximum departure eccentric anomoly [rad].
- Eflb (``float``, ``int``): Minimum arrival eccentric anomoly [rad].
- E0fb (``float``, ``int``): Maximum arrival eccentric anomoly [rad].
- mu (``float``): Gravitational parameter of primary body [m^3/s^2].
- hf (``bool``): ``True`` for continous thrust, ``False`` for impulsive thrust.
"""
if all([(isinstance(elem, list) or isinstance(elem, tuple) or isinstance(elem, np.ndarray)) for elem in [elem0, elemf]]):
elem0 = np.asarray(elem0, np.float64)
elemf = np.asarray(elemf, np.float64)
else:
raise ValueError(
"Both elem0 and elemf must be supplied as instances of list, tuple, or numpy.ndarray.")
if all([elem.size == 6 for elem in [elem0, elemf]]):
self.elem0 = elem0
self.elemf = elemf
else:
raise TypeError("Both elem0 and elemf must be 6-dimensional.")
if not all([(isinstance(T, float) or isinstance(T, int)) for T in [Tlb, Tub]]):
raise TypeError(
"Both Tlb and Tub must be supplied as instances of either float or int.")
elif not Tlb < Tub:
raise ValueError("Tlb must be less than Tub.")
else:
self.Tlb = float(Tlb)
self.Tub = float(Tub)
# initialise base
_direct_base.__init__(self, mass, thrust, isp, nseg, mu, hf)
self.mu = mu
self.E0lb = E0lb
self.E0ub = E0ub
self.Eflb = Eflb
self.Efub = Efub
def fitness(self, z):
# epochs (mjd2000)
t0 = pk.epoch(0)
tf = pk.epoch(z[0])
# final mass
mf = z[1]
# eccentric anomolies
E0 = z[2]
Ef = z[3]
# controls
u = z[4:]
# set Keplerian elements
self.elem0[5] = E0
self.elemf[5] = Ef
# compute Cartesian states
r0, v0 = pk.par2ic(self.elem0, self.mu)
rf, vf = pk.par2ic(self.elemf, self.mu)
# spacecraft states
x0 = pk.sims_flanagan.sc_state(r0, v0, self.sc.mass)
xf = pk.sims_flanagan.sc_state(rf, vf, mf)
# set leg
self.leg.set(t0, x0, u, tf, xf)
# compute equality constraints
ceq = np.asarray(self.leg.mismatch_constraints(), np.float64)
# nondimensionalise equality constraints
ceq[0:3] /= pk.AU
ceq[3:6] /= pk.EARTH_VELOCITY
ceq[6] /= self.sc.mass
# compute inequality constraints
cineq = np.asarray(self.leg.throttles_constraints(), np.float64)
return np.hstack(([-mf], ceq, cineq))
def get_bounds(self):
pi = 3.14159265359
lb = [self.Tlb, self.sc.mass / 10, self.E0lb,
self.Eflb, *(-1, -1, -1) * self.nseg]
ub = [self.Tub, self.sc.mass, self.E0ub,
self.Efub, *(1, 1, 1) * self.nseg]
return (lb, ub)
def _plot_traj(self, z, axis, units):
# times
t0 = pk.epoch(0)
tf = pk.epoch(z[0])
# Keplerian
kep0 = pk.planet.keplerian(t0, self.elem0)
kepf = pk.planet.keplerian(tf, self.elemf)
# plot Keplerian
pk.orbit_plots.plot_planet(
kep0, t0, units=units, color=(0.8, 0.8, 0.8), axes=axis)
pk.orbit_plots.plot_planet(
kepf, tf, units=units, color=(0.8, 0.8, 0.8), axes=axis)
@staticmethod
def _get_controls(z):
return z[4:]
|
gpl-3.0
|
kjung/scikit-learn
|
sklearn/decomposition/tests/test_pca.py
|
12
|
11809
|
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
if hasattr(pca, 'random_state'):
pca.random_state = rng
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=4)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
|
bsd-3-clause
|
alkyl1978/gnuradio
|
gr-analog/examples/fmtest.py
|
40
|
7941
|
#!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import channels
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
fmtx = analog.nbfm_tx(audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = analog.sig_source_c(if_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
lo_freq, # frequency
1.0, # amplitude
0) # DC Offset
mixer = blocks.multiply_cc()
self.connect(self, fmtx, (mixer, 0))
self.connect(lo, (mixer, 1))
self.connect(mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = blocks.add_cc()
for n in xrange(self._N):
sig = analog.sig_source_f(self._audio_rate, analog.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = blocks.vector_sink_c()
self.channel = channels.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(analog.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(analog.standard_squelch(self._audio_rate*10))
self.snks.append(blocks.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
adammenges/statsmodels
|
statsmodels/regression/_prediction.py
|
27
|
6035
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 11:29:18 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
# this is similar to ContrastResults after t_test, partially copied and adjusted
class PredictionResults(object):
def __init__(self, predicted_mean, var_pred_mean, var_resid,
df=None, dist=None, row_labels=None):
self.predicted_mean = predicted_mean
self.var_pred_mean = var_pred_mean
self.df = df
self.var_resid = var_resid
self.row_labels = row_labels
if dist is None or dist == 'norm':
self.dist = stats.norm
self.dist_args = ()
elif dist == 't':
self.dist = stats.t
self.dist_args = (self.df,)
else:
self.dist = dist
self.dist_args = ()
@property
def se_obs(self):
return np.sqrt(self.var_pred_mean + self.var_resid)
@property
def se_mean(self):
return np.sqrt(self.var_pred_mean)
def conf_int(self, obs=False, alpha=0.05):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
se = self.se_obs if obs else self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
return np.column_stack((lower, upper))
def summary_frame(self, what='all', alpha=0.05):
# TODO: finish and cleanup
import pandas as pd
from statsmodels.compat.collections import OrderedDict
ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha, obs=False)
to_include = OrderedDict()
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
to_include['obs_ci_lower'] = ci_obs[:, 0]
to_include['obs_ci_upper'] = ci_obs[:, 1]
self.table = to_include
#OrderedDict doesn't work to preserve sequence
# pandas dict doesn't handle 2d_array
#data = np.column_stack(list(to_include.values()))
#names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, pred_kwds=None):
"""
compute prediction results
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : instance
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
### prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
if row_labels is None:
if hasattr(exog, 'index'):
row_labels = exog.index
else:
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
### end
if pred_kwds is None:
pred_kwds = {}
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
var_pred_mean = (exog * np.dot(covb, exog.T).T).sum(1)
# TODO: check that we have correct scale, Refactor scale #???
var_resid = self.scale / weights # self.mse_resid / weights
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale'] / weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels)
|
bsd-3-clause
|
MediffRobotics/DeepRobotics
|
DeepLearnMaterials/tutorials/tensorflowTUT/tf11_build_network/full_code.py
|
1
|
1955
|
# View more python learning tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
# Make up some real data
x_data = np.linspace(-1, 1, 300, dtype=np.float32)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape).astype(np.float32)
y_data = np.square(x_data) - 0.5 + noise
##plt.scatter(x_data, y_data)
##plt.show()
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
# add hidden layer
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1, 10, 1, activation_function=None)
# the error between prediciton and real data
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# important step
sess = tf.Session()
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
sess.run(tf.global_variables_initializer())
for i in range(1000):
# training
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
# to see the step improvement
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
|
gpl-3.0
|
computational-class/computational-communication-2016
|
code/tba/DeepLearningMovies/KaggleWord2VecUtility.py
|
12
|
2103
|
#!/usr/bin/env python
import re
import nltk
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
class KaggleWord2VecUtility(object):
"""KaggleWord2VecUtility is a utility class for processing raw HTML text into segments for further learning"""
@staticmethod
def review_to_wordlist( review, remove_stopwords=False ):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
review_text = BeautifulSoup(review).get_text()
#
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#
# 3. Convert words to lower case and split them
words = review_text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
#
# 5. Return a list of words
return(words)
# Define a function to split a review into parsed sentences
@staticmethod
def review_to_sentences( review, tokenizer, remove_stopwords=False ):
# Function to split a review into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
raw_sentences = tokenizer.tokenize(review.decode('utf8').strip())
#
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append( KaggleWord2VecUtility.review_to_wordlist( raw_sentence, \
remove_stopwords ))
#
# Return the list of sentences (each sentence is a list of words,
# so this returns a list of lists
return sentences
|
mit
|
r-mart/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
49
|
13124
|
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
assert_allclose(_dirichlet_expectation_1d(x),
np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
|
bsd-3-clause
|
srli/SoftwareSystems
|
lecture14/thinkplot.py
|
88
|
12565
|
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
return cls.color_iter
def PrePlot(num=None, rows=1, cols=1):
"""Takes hints about what's coming.
num: number of lines that will be plotted
"""
if num:
Brewer.InitializeIter(num)
# TODO: get sharey and sharex working. probably means switching
# to subplots instead of subplot.
# also, get rid of the gray background.
if rows > 1 or cols > 1:
pyplot.subplots(rows, cols, sharey=True)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(rows, cols, plot_number):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
pyplot.subplot(rows, cols, plot_number)
class InfiniteList(list):
"""A list that returns the same value for all indices."""
def __init__(self, val):
"""Initializes the list.
val: value to be stored
"""
list.__init__(self)
self.val = val
def __getitem__(self, index):
"""Gets the item with the given index.
index: int
returns: the stored value
"""
return self.val
def Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.iteritems():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
Brewer.ClearIter()
pyplot.clf()
def Figure(**options):
"""Sets options for the current figure."""
Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def Plot(xs, ys, style='', **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
color_iter = Brewer.GetIter()
if color_iter:
try:
options = Underride(options, color=color_iter.next())
except StopIteration:
print 'Warning: Brewer ran out of colors.'
Brewer.ClearIter()
options = Underride(options, linewidth=3, alpha=0.8)
pyplot.plot(xs, ys, style, **options)
def Scatter(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
pyplot.scatter(xs, ys, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ps = pmf.Render()
if pmf.name:
options = Underride(options, label=pmf.name)
Plot(xs, ps, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, fs = hist.Render()
width = min(Diff(xs))
if hist.name:
options = Underride(options, label=hist.name)
options = Underride(options,
align='center',
linewidth=0,
width=width)
pyplot.bar(xs, fs, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs.pop()
ps.pop()
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs.pop(0)
ps.pop(0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
if cdf.name:
options = Underride(options, label=cdf.name)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.iterkeys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
loc = options.get('loc', 0)
legend = options.get('legend', True)
if legend:
pyplot.legend(loc=loc)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
# TODO: figure out how to show more than one plot
Config(**options)
pyplot.show()
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
if root:
for fmt in formats:
SaveFormat(root, fmt)
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print 'Writing', filename
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = Brewer.ColorGenerator(7)
for color in color_iter:
print color
if __name__ == '__main__':
main()
|
gpl-3.0
|
subodhchhabra/pandashells
|
pandashells/test/p_regress_tests.py
|
3
|
3640
|
#! /usr/bin/env python
import sys
import re
from mock import patch, MagicMock
from unittest import TestCase
import numpy as np
import pandas as pd
from pandashells.bin.p_regress import main
class MainTests(TestCase):
@patch(
'pandashells.bin.p_regress.sys.argv',
'p.regress -m y~x'.split())
@patch('pandashells.bin.p_regress.io_lib.df_to_output')
@patch('pandashells.bin.p_regress.io_lib.df_from_input')
def test_cli_stats(self, df_from_input_mock, df_to_output_mock):
df_in = pd.DataFrame({
'x': range(1, 101),
'y': range(1, 101),
})
df_from_input_mock.return_value = df_in
write_mock = MagicMock()
sys.stdout = MagicMock()
sys.stdout.write = write_mock
main()
sys.stdout = sys.__stdout__
out_str = write_mock.call_args_list[0][0][0].replace('\n', ' ')
rex = re.compile(r'.*x\s+1\.0+')
m = rex.match(out_str)
self.assertTrue(True if m else False)
@patch(
'pandashells.bin.p_regress.sys.argv',
'p.regress -m y~x --plot'.split())
@patch('pandashells.bin.p_regress.io_lib.df_from_input')
@patch('pandashells.bin.p_regress.get_module')
def test_cli_plots_osx(self, get_module_mock, df_from_input_mock):
backend_mock = MagicMock(lower=MagicMock(return_value='macosx'))
mpl_mock = MagicMock(get_backend=MagicMock(return_value=backend_mock))
show_mock = MagicMock()
plot_lib_mock = MagicMock(show=show_mock)
get_module_mock.side_effect = [
plot_lib_mock,
mpl_mock,
MagicMock(return_value=MagicMock()),
MagicMock(return_value=MagicMock()),
]
df_in = pd.DataFrame({
'x': range(1, 101),
'y': range(1, 101),
})
df_from_input_mock.return_value = df_in
sys.stdout = MagicMock()
main()
sys.stdout = sys.__stdout__
self.assertTrue(show_mock.called)
@patch(
'pandashells.bin.p_regress.sys.argv',
'p.regress -m y~x --plot'.split())
@patch('pandashells.bin.p_regress.io_lib.df_from_input')
@patch('pandashells.bin.p_regress.get_module')
def test_cli_plots_tkagg(self, get_module_mock, df_from_input_mock):
backend_mock = MagicMock()
backend_mock.lower = MagicMock(return_value='tkagg')
mpl_mock = MagicMock(get_backend=backend_mock)
show_mock = MagicMock()
plot_lib_mock = MagicMock(show=show_mock)
get_module_mock.side_effect = [
plot_lib_mock,
mpl_mock,
MagicMock(return_value=MagicMock()),
MagicMock(return_value=MagicMock()),
]
df_in = pd.DataFrame({
'x': range(1, 101),
'y': range(1, 101),
})
df_from_input_mock.return_value = df_in
sys.stdout = MagicMock()
main()
sys.stdout = sys.__stdout__
self.assertTrue(show_mock.called)
@patch(
'pandashells.bin.p_regress.sys.argv',
'p.regress -m y~x --fit'.split())
@patch('pandashells.bin.p_regress.io_lib.df_to_output')
@patch('pandashells.bin.p_regress.io_lib.df_from_input')
def test_cli_fit(self, df_from_input_mock, df_to_output_mock):
df_in = pd.DataFrame({
'x': range(1, 101),
'y': range(1, 101),
})
df_from_input_mock.return_value = df_in
main()
df_out = df_to_output_mock.call_args_list[0][0][1]
self.assertTrue(np.allclose(df_out.y, df_out.fit_))
self.assertTrue(np.allclose(df_out.y * 0, df_out.resid_))
|
bsd-2-clause
|
Windy-Ground/scikit-learn
|
examples/cluster/plot_cluster_iris.py
|
350
|
2593
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
|
bsd-3-clause
|
LohithBlaze/scikit-learn
|
sklearn/__check_build/__init__.py
|
345
|
1671
|
""" Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
|
bsd-3-clause
|
alexei-matveev/ase-local
|
ase/calculators/ase_qmmm_manyqm.py
|
4
|
60409
|
"""QM/MM interface with QM=FHI-aims, MM=gromacs
QM could be something else, but you need to read in qm-atom charges
from the qm program (in method 'get_qm_charges')
One can have many QM regions, each with a different calculator.
There can be only one MM calculator, which is calculating the whole
system.
Non-bonded interactions:
------------------------
Generally:
Within the same QM-QM:
by qm calculator
MM-MM:
by MM calculator
QM-MM:
by MM using MM vdw parameters and QM charges.
Different QM different QM:
by MM using QM and MM charges and MM-vdw parameters
The Hirschfeld charges (or other atomic charges)
on QM atoms are calculated by QM in a H terminated cluster in vacuum.
The charge of QM atom next to MM atom (edge-QM-atom)
and its H neighbors are set as in the classical force field.
The extra(missing) charge results from:
1) linkH atoms
2) The edge-QM atoms, and their qm-H neighbors,
have their original MM charges.
3) and the fact that the charge of the QM fraction
is not usually an integer when using the original MM charges.
It is added equally to all QM atoms
(not being linkH and not being edge-QM-atom or its H neighbor)
so that the total charge of the MM-fragment involving QM atoms
will be the same as in the original MM-description.
Vdw interactions are calculated by MM-gromacs for MM and MM-QM inteactions.
The QM-QM vdw interaction s could be done by the FHI-aims if desired
(by modifying the imput for QM-FHI-aims input accordingly.
Bonded interactions::
E=
E_qm(QM-H) ; qm energy of H terminated QM cluster(s)
+ E_mm(ALL ATOMS) ; mm energy of all atoms,
; except for terms in which all MM-interacting atoms are
; in the same QM region
Forces do not act on link atoms but they are positioned by scaling.
Forces on link atoms are given to their QM and MM neighbors by chain rule.
(see J. Chem. Theory Comput. 2011, 7, 761-777).
The optimal edge-qm-atom-linkH bond length is calculated
by QM in 'get_eq_qm_atom_link_h_distances'
or they are read from a file.
Questions & Comments [email protected]
I'm especially interested in cases when we need two or more
QM regions. For instance two redox centers in a protein,
cathode and anode of a fuel cell ... you name it!
Some things to improve:
1) Water topology issue (at the moment water cannot be in QM),
Its topology should be put into the main
topology file, not in a separate file.
2) point charges and periodicity (if desired) to the QM calculation
(now in vacuum)
3) Eichinger type of link atom treatment with fitted force constants for
linkH-QMedge (bond strecth)
linkH-QMedge-QMnextTOedge (angle terms)
4) file io using unformatted formats (.trr) instead of g96
This is not easily possible without loading extra stuff from
ftp://ftp.gromacs.org/pub/contrib/xd...e-1.1.1.tar.gz.
5) Utilize gromacs-python wrapper: (just found this today 31.12.2012...)
http://orbeckst.github.com/GromacsWrapper/index.html#
"""
import sys
import numpy as np
def get_neighbor_list(system):
"""
Makes a neighbor list of a system (ase Atoms).
See
https:\
//wiki.fysik.dtu.dk/ase/ase/calculators/calculators.html#module-calculators
"""
from ase.calculators.neighborlist import NeighborList
from ase.data import covalent_radii
import os
import pickle
NEIGHBOR_FILE = 'neighbor_list_for_ase_qmmm.txt'
if os.path.exists(NEIGHBOR_FILE):
print('Reading qm/mm neighbor list from file:')
print('neighbor_list_for_ase_qmmm.txt')
myfile = open(NEIGHBOR_FILE, 'r')
neighbor_list = pickle.load(myfile)
else:
cut = [covalent_radii[atom.number] for atom in system]
skin = [0.2 for atom in system]
neighbor_list = NeighborList(cut, skin, \
self_interaction=False, bothways=True)
neighbor_list.update(system)
file = open(NEIGHBOR_FILE, 'w')
pickle.dump(neighbor_list, file)
file.close()
return neighbor_list
def get_qm_atoms(indexfilename='index.ndx'):
"""
Read the indexes of all QM atoms (there may be many QM regions)
"""
infile = open(indexfilename,'r')
lines = infile.readlines()
infile.close()
qms = []
for iline, line in enumerate(lines):
if (('[ QM' in line) or ('[ qm' in line) or ('[ Qm' in line)) \
or (('[QM' in line) or ('[qm' in line) or ('[Qm' in line)):
qm = []
for checkline in lines[iline+1:]:
if ('[') in checkline:
break
else:
qm = qm + [int(float(s)-1.0) for s in \
checkline.split() if s.isdigit()]
qm = list(set(qm))
qms.append(qm)
return qms
class LinkAtom:
"""
Class for information about a single link-atom
(it terminates a QM cluster)
qm_region_index and link_atom_index refer to the following indexing system:
[[QM0 link atoms indexes from 0],[QM1 link atoms indexes from 0],...]
So above the second link atom in second qm region would have
qm_region_index=1, link_atom_index=1
link_atom_index_in_qm tells which index in qm system the link atom has
for instance
qm_region_index=1, link_atom_index_in_qm=20
means that link atom is 21'st atom in the second qm system
"""
def __init__(self, atom, qm_region_index, link_atom_index):
""" set initial values to a link atom object """
self.atom = atom
self.qm_region_index = qm_region_index
self.link_atom_index = link_atom_index
self.link_atom_index_in_qm = None
self.qm_neighbor = None
self.mm_neighbor = None
self.qm2_neighbors = []
self.qm3_neighbors = []
self.mm2_neighbors = []
self.set_qm2_neighbors = set([])
self.set_qm3_neighbors = set([])
self.set_mm2_neighbors = set([])
self.force_constant = 0.0
self.equilibrium_distance_xh = 0.0
self.equilibrium_distance_xy = 0.0
def set_link_atom(self, atom):
""" set an ase-atom to be the link atom """
self.atom = atom
def set_link_atom_qm_region_index(self, qm_region_index):
""" set to which qm region the link atom belongs to """
self.qm_region_index = qm_region_index
def set_link_atom_index_in_qm(self, link_atom_index_in_qm):
""" set what is my link atom index in this qm region """
self.link_atom_index_in_qm = link_atom_index_in_qm
def set_link_atom_qm_neighbor(self, qm_neighbor):
""" set what index does my qm neighbor have"""
self.qm_neighbor = qm_neighbor
def set_link_atom_mm_neighbor(self, mm_neighbor):
""" set what index does my mm neighbor have"""
self.mm_neighbor = mm_neighbor
def set_link_atom_qm2_neighbors(self, qm2_neighbors):
""" set what index does my second qm neighbor have"""
self.qm2_neighbors = qm2_neighbors
def set_link_atom_qm3_neighbors(self, qm3_neighbors):
""" set what index does my third qm neighbor have"""
self.qm3_neighbors = qm3_neighbors
def set_link_atom_mm2_neighbors(self, mm2_neighbors):
""" set what index does my second mm neighbor have"""
self.mm2_neighbors = mm2_neighbors
def set_force_constant(self, force_constant):
""" set the force constant of bond edge-qm -- linkH (not used)"""
self.force_constant = force_constant
def set_equilibrium_distance_xh(self, equilibrium_distance_xh):
""" set the equilibrium edge-qm -- linkH distance """
self.equilibrium_distance_xh = equilibrium_distance_xh
def set_equilibrium_distance_xy(self, equilibrium_distance_xy):
"""set the equilibrium edge-qm --
edge-mm distance (by MM-force field)"""
self.equilibrium_distance_xy = equilibrium_distance_xy
def get_link_atom(self):
""" get an ase-atom to be the link atom """
return self.atom
def get_link_atom_qm_region_index(self):
""" get to which qm region the link atom belongs to """
return self.qm_region_index
def get_link_atom_index_in_qm(self):
""" get what is my link atom index in this qm region """
return self.link_atom_index_in_qm
def get_link_atom_qm_neighbor(self):
""" get what index does my qm neighbor have"""
return self.qm_neighbor
def get_link_atom_mm_neighbor(self):
""" get what index does my mm neighbor have"""
return self.mm_neighbor
def get_link_atom_qm2_neighbors(self):
""" get what index does my second qm neighbor have"""
return self.qm2_neighbors
def get_link_atom_qm3_neighbors(self):
""" get what index does my third qm neighbor have"""
return self.qm3_neighbors
def get_link_atom_mm2_neighbors(self):
""" get what index does my second mm neighbor have"""
return self.mm2_neighbors
def get_force_constant(self):
""" get the force constant of bond edge-qm -- linkH (not used)"""
return self.force_constant
def get_equilibrium_distance_xh(self):
""" get the equilibrium edge-qm -- linkH distance """
return self.equilibrium_distance_xh
def get_equilibrium_distance_xy(self):
"""get the equilibrium edge-qm --
edge-mm distance (by MM-force field)"""
return self.equilibrium_distance_xy
class AseQmmmManyqm:
""" This is a qm/mm interface with qm=FHI-aims, mm=gromacs.
We can have many QM regions, each with a different calculator.
There can be only one MM calculator, which is calculating the whole
system.
Numeration of atoms starts from 0. (in qms, mms)
In qm calculations link atom(s) come(s) last.
For any qm region, the optimal bond lengths for all edge_atom-link_atom
pairs are optimized by QM simultaneously at the beginning of
the run when the flag link_info='byQM' is used (by method . The positions of other a
"""
def __init__(self, nqm_regions, \
qm_calculators, mm_calculator, \
link_info='byQM'):
""" Set initial values to each qm and mm calculator.
Additionally set information for the qm/mm interface.
The information about qm and mm indexes is read from
a file 'index.ndx'
Which can be generated with a gromacs tool 'make_ndx'
http://www.gromacs.org/Documentation/Gromacs_Utilities/make_ndx
Parameters
==========
nqm_regions: int
how many qm regions
qm_calculators: list members of a Class defining a Calculator
ase-qm calculator for each qm region
mm_calculator: a member of a Class defining a Calculator
ase-mm calculator for mm (the whole system)
link_info: str
can be either
'byQM': the edge_qm_atom-link_h_atom distances are calculated by QM
'byFile':the edge_qm_atom-link_h_atom distances are read from a file
"""
from ase.io import read, write
import os, glob
# clean
files = glob.glob('test-*')
for file in files:
try:
os.remove(file)
except OSError:
pass
self.atoms = None
self.positions = None
self.neighbor_list = None
self.link_atoms = []
self.energy = None
self.e_delta_stretch = None
self.nqm_regions = nqm_regions
self.qm_calculators = qm_calculators
self.mm_calculator = mm_calculator
self.qmatom_types = []
self.mmatom_types = []
#det unique name for each qm region
# (the output file of each qm calculation)
for i in range(len(self.qm_calculators)):
self.qm_calculators[i].set(output_template = 'aims'+str(i))
self.link_systems = None
self.equilibrium_distances_xy = []
self.equilibrium_distances_xh = []
self.force_constants = []
# get the sets of qm atoms
self.qms = get_qm_atoms()
self.set_qms = set(sum(self.qms, []))
print('qmsystem(s), indexing from 0:')
print('')
for index_out in self.qms:
index_str = ''
for index in index_out:
index_str += str(index) + ' '
print ('%s' % index_str)
print('')
if ( len(self.qms) != nqm_regions):
print ('Number of set of QM atoms does not match with nqm_regions')
print ('self.qms %s' % str(self.qms))
print ('nqm_regions %s' % str(nqm_regions))
sys.exit()
if ( len(self.qms) != len(qm_calculators)):
print ('Number of set of QM atoms does not match with')
print ('the number of QM calculators')
sys.exit()
#read the actual structure to define link atoms and their neighbors
system_tmp = mm_calculator.atoms
self.positions = system_tmp.get_positions()
#get neighbor lists
self.neighbor_list = get_neighbor_list(system_tmp)
#get the mm-atoms next to link atoms for all qm regions
(self.mms_edge, self.qms_edge, self.set_mms_edge, self.set_qms_edge) = \
self.get_edge_qm_and_mm_atoms(self.qms, system_tmp)
#get the mm atoms being second neighbors to any qm atom
(self.second_mms, self.set_second_mms) = \
self.get_next_neighbors(self.mms_edge, self.set_qms)
#get the qm atoms being second neighbors to link atom
(self.second_qms, self.set_second_qms) = \
self.get_next_neighbors(self.qms_edge, \
self.set_mms_edge)
#get the qm atoms being neighbors to link atom (edge-qm atoms)
# and their neighbors which have only single neighbor
# (for example edge-QM(C)-H or edge-QM(C)=O; for charge exclusion)
self.constant_charge_qms = \
self.get_constant_charge_qms\
(self.set_qms_edge, self.set_second_qms)
#get the qm atoms being third neighbors to link atom
(self.third_qms, self.set_third_qms) = \
self.get_next_neighbors\
(self.second_qms, self.set_qms_edge)
print('self.qms %s' % self.qms)
print('QM edge, MM edge %s' \
% str(self.qms_edge)+' '+ str(self.mms_edge))
print('MM second N of Link %s' % str(self.second_mms))
print('QM second N of Link %s' % str(self.second_qms))
print('QM third N of Link %s' % str(self.third_qms))
if link_info == 'byFILE':
self.read_eq_distances_from_file()
else:
#get QM-MM bond lengths
self.get_eq_distances_xy(\
topfilename=mm_calculator.topology_filename,\
force_field= mm_calculator.force_field)
#get QM-linkH distances by QM for all link atoms
self.get_eq_qm_atom_link_h_distances(system_tmp)
# write current link-info data to file (it can be later used,
# so XH bondconstants are already calculated by QM
# Also one can manually change the XY bond lengths
self.write_eq_distances_to_file(\
self.qms_edge)
#get target charge of each qm-region
self.classical_target_charge_sums = \
self.get_classical_target_charge_sums\
(self.mm_calculator.topology_filename, self.qms)
#get a list of link H atoms
self.link_atoms = self.get_link_atoms(\
self.qms_edge, self.mms_edge,\
self.force_constants,\
self.equilibrium_distances_xh, \
self.equilibrium_distances_xy)
self.qmsystems = self.define_QM_clusters_in_vacuum(system_tmp)
for iqm, qm in enumerate(self.qmsystems):
write('test-qm-'+str(iqm)+'.xyz', qm)
#attach calculators to qm regions
for iqm, qm in enumerate(self.qmsystems):
self.qmsystems[iqm].set_calculator(self.qm_calculators[iqm])
#attach calculators to the mm region (the whole system)
self.mm_system = system_tmp
self.mm_system.set_calculator(self.mm_calculator)
#initialize total energy and forces of qm regions
#and the mm energy
self.qm_energies = []
self.qm_forces = []
self.qm_charges = []
self.sum_qm_charge = []
for iqm, qm in enumerate(self.qmsystems):
self.qm_energies.append(0.0)
self.qm_forces.append(None)
self.qm_charges.append(None)
self.sum_qm_charge.append(None)
self.mm_energy = None
#set initial zero forces
self.forces = np.zeros((len(self.positions), 3))
self.charges = np.zeros((len(self.positions), 1))
try:
os.remove(self.mm_calculator.topology_filename+'.orig')
except:
pass
print('%s' % str(self.mm_calculator.topology_filename))
os.system('cp ' + self.mm_calculator.topology_filename + ' ' +\
self.mm_calculator.topology_filename + '.orig')
#remove some classical bonded interaction in the topology file
# this need to be done only once, because the bond topology
# is unchanged during a QM/MM run
#(QM charges can be updated in the topology, however)
# the original topology is generated when calling Gromacs(
# in the main script setting up QM, MM and minimization
if (self.mm_calculator.name == 'Gromacs'):
self.kill_top_lines_containing_only_qm_atoms\
(self.mm_calculator.topology_filename, self.qms, \
self.mm_calculator.topology_filename)
else:
print('Only Gromacs MM-calculator implemented in ASE-QM/MM')
sys.exit()
#exclude qm-qm non-bonded interactions in MM-gromacs
self.add_exclusions()
#generate input file for gromacs run
self.mm_calculator.generate_gromacs_run_file()
######### end of Init #####################################
def get_forces(self, atoms):
"""get forces acting on all atoms except link atoms """
self.update(atoms)
return self.forces
def get_potential_energy(self, atoms):
""" get the total energy of the MM and QM system(s) """
self.update(atoms)
return self.energy
def update(self, atoms):
"""Updates and does a check to see if a calculation is required"""
if self.calculation_required(atoms):
# performs an update of the atoms and qm systems
self.atoms = atoms.copy()
self.positions = atoms.get_positions()
self.mm_system = atoms.copy()
#get the positions of link H atoms
self.link_atoms = self.get_link_atoms(\
self.qms_edge, self.mms_edge,\
self.force_constants,\
self.equilibrium_distances_xh, \
self.equilibrium_distances_xy)
#get QM systens
self.qmsystems = self.define_QM_clusters_in_vacuum(\
self.atoms)
self.calculate(atoms)
def calculation_required(self, atoms):
"""Checks if a calculation is required"""
if ((self.positions is None) or
(self.atoms != atoms) or
(self.energy is None)):
return True
return False
def calculate_mm(self):
""" Calculating mm energies and forces """
import os
mm = self.atoms
mm.set_calculator(self.mm_calculator)
if (self.mm_calculator.name == 'Gromacs'):
try:
os.remove(self.mm_calculator.base_filename+'.log')
except:
pass
self.mm_calculator.update(mm)
self.mm_energy = 0
self.mm_energy += mm.get_potential_energy()
self.forces += mm.get_forces()
def calculate_qms(self):
""" QM calculations on all qm systems are carried out """
for iqm, qm in enumerate(self.qmsystems):
qm.set_calculator(self.qm_calculators[iqm])
self.qm_energies[iqm] = qm.get_potential_energy()
self.qm_forces[iqm] = np.zeros((len(qm), 3))
self.qm_forces[iqm] = qm.get_forces()
(self.sum_qm_charge[iqm], self.qm_charges[iqm]) = \
self.get_qm_charges(iqm,
number_of_link_atoms =\
len(self.qms_edge[iqm]))
if (len(self.qms[iqm]) != len(self.qm_charges[iqm])):
print('Problem in reading charges')
print('len(self.qms[iqm]) %s' % str(len(self.qms[iqm])))
print('len(self.qm_charges[iqm]) %s' \
% str(len(self.qm_charges[iqm])))
print('Check the output of QM program')
print('iqm, qm %s' % str(iqm)+ ' '+ str(qm))
print('self.qm_charges[iqm] %s' % str(self.qm_charges[iqm]))
sys.exit()
def calculate_single_qm(self, myqm, mycalculator):
""" Calculate the qm energy of a single qm region
(for X-H bond length calculations)
"""
myqm.set_calculator(mycalculator)
return myqm.get_potential_energy()
def run(self, atoms):
"""Runs QMs and MM"""
self.forces = np.zeros((len(atoms), 3))
self.calculate_qms()
# update QM charges to MM topology file
self.set_qm_charges_to_mm_topology()
#generate gromacs run file (.tpr) base on new topology
self.mm_calculator.generate_gromacs_run_file()
self.calculate_mm()
def calculate(self, atoms):
"""gets all energies and forces (qm, mm, qm-mm and corrections)"""
self.run(atoms)
self.energy = sum(self.qm_energies)+self.mm_energy
#map the forces of QM systems to all atoms
#loop over qm regions
for qm, qm_force in zip(self.qms, self.qm_forces):
#loop over qm atoms in a qm region
#set forces to the all-atom set (the all atom set does not
# have link atoms)
for iqm_atom, qm_atom in enumerate(qm):
self.forces[qm_atom] = self.forces[qm_atom] + \
qm_force[iqm_atom]
self.get_link_atom_forces(action = 'QM')
def get_link_atoms(self, qm_links, mm_links, \
force_constants,\
equilibrium_distances_xh, equilibrium_distances_xy):
"""
QM atoms can be bonded to MM atoms. In this case one sets
an extra H atom (a link atom).
The positions of the all link H atoms in all qm regions are
set along QM-MM and bond with length defined by:
J. Chem. Theory Comput 2011, 7, 761-777, Eq 1
r_XH = r_XY_current*(r_XH_from_qm_calculation /r_XY_from_forceField)
"""
import math
from ase import Atom
link_hs = []
for i_qm_region, (qm0, mm0) in enumerate (zip( qm_links, mm_links)):
for i_link_atom, (qmatom, mmatom) in enumerate (zip(qm0, mm0)):
dx = (self.positions[mmatom, 0] - self.positions[qmatom, 0])
dy = (self.positions[mmatom, 1] - self.positions[qmatom, 1])
dz = (self.positions[mmatom, 2] - self.positions[qmatom, 2])
d = math.sqrt(dx* dx+ dy* dy+ dz* dz)
unit_x = dx/ d
unit_y = dy/ d
unit_z = dz/ d
xh_bond_length = \
d*\
self.equilibrium_distances_xh[i_qm_region][i_link_atom]/\
self.equilibrium_distances_xy[i_qm_region][i_link_atom]
posh_x = self.positions[qmatom, 0] + unit_x* xh_bond_length
posh_y = self.positions[qmatom, 1] + unit_y* xh_bond_length
posh_z = self.positions[qmatom, 2] + unit_z* xh_bond_length
tmp_link_h = (Atom('H', position=(posh_x, posh_y, posh_z)))
link_h = LinkAtom(atom=tmp_link_h, \
qm_region_index = i_qm_region,\
link_atom_index = i_link_atom)
link_h.set_link_atom_qm_neighbor(qmatom)
link_h.set_link_atom_mm_neighbor(mmatom)
link_h.set_force_constant(\
force_constants[i_qm_region][i_link_atom])
link_h.set_equilibrium_distance_xh(equilibrium_distances_xh\
[i_qm_region][i_link_atom])
link_h.set_equilibrium_distance_xy(equilibrium_distances_xy\
[i_qm_region][i_link_atom])
link_hs.append(link_h)
return (link_hs)
def get_link_atom_forces(self, action):
""" Add forces due to link atom to QM atom
and to MM atom next to each link atom.
Top Curr Chem (2007) 268: 173-290
QM/MM Methods for Biological Systems
Hans Martin Senn and Walter Thiel
Eqs. 10(p192), 12(p193), 16a, 16b(p 194)
"""
for link_atom in self.link_atoms:
i_qm_atom = link_atom.qm_neighbor
i_mm_atom = link_atom.mm_neighbor
i_qm_region = link_atom.qm_region_index
link_atom_index_in_qm = link_atom.get_link_atom_index_in_qm()
if (action == 'QM'):
force_of_h = self.qm_forces[i_qm_region][link_atom_index_in_qm]
elif (action == 'MM'):
force_of_h = link_atom.mm_force
else:
print('not implemented in get_link_atom_forces')
sys.exit()
g = link_atom.equilibrium_distance_xh/\
link_atom.equilibrium_distance_xy
self.forces[i_mm_atom, 0] = self.forces[i_mm_atom, 0] +\
force_of_h[0] * g
self.forces[i_mm_atom, 1] = self.forces[i_mm_atom, 1] +\
force_of_h[1] * g
self.forces[i_mm_atom, 2] = self.forces[i_mm_atom, 2] +\
force_of_h[2] * g
self.forces[i_qm_atom, 0] = self.forces[i_qm_atom, 0] +\
force_of_h[0] * (1.0 - g)
self.forces[i_qm_atom, 1] = self.forces[i_qm_atom, 1] +\
force_of_h[1] * (1.0 - g)
self.forces[i_qm_atom, 2] = self.forces[i_qm_atom, 2] +\
force_of_h[2] * (1.0 - g)
def add_energy_exclusion_group(self, indexfilename='index.ndx'):
"""
Add energy exclusions for MM calculations.
This is the way to block non-bonded MM (coulomb&vdW)
interactions within a single QM region.
"""
infile = open(indexfilename,'r')
lines = infile.readlines()
infile.close()
qm_region_names = []
for line in lines:
if (('QM' in line) or ('Qm' in line) or ('qm' in line)):
qm_region_names.append(line.split()[1])
infile = open(self.mm_calculator.base_filename+'.mdp','r')
lines = infile.readlines()
infile.close()
outfile = open(self.mm_calculator.base_filename+'.mdp','w')
for line1 in lines:
outfile.write(line1)
outfile.write(';qm regions should not MM-interact with themselves \n')
outfile.write(';but separate qm regions MM-interact with each other \n')
outfile.write('energygrps = ')
for name in qm_region_names:
outfile.write(name + ' ')
outfile.write('\n')
outfile.write('energygrp_excl = ')
for name in qm_region_names:
outfile.write(name + ' ' + name + ' ')
outfile.write('\n')
outfile.close()
return
def add_exclusions(self):
"""
Add energy exclusions for MM calculations.
This is the way to block non-bonded MM (coulomb&vdW)
interactions within a single QM region.
"""
infile = open(self.mm_calculator.topology_filename,'r')
lines = infile.readlines()
infile.close()
outfile = open(self.mm_calculator.topology_filename,'w')
for line in lines:
if '[ angle' in line:
outfile.write('\n')
outfile.write('[ exclusions ] \n')
outfile.write(\
'; qm regions should not MM-interact with themselves \n')
outfile.write(\
'; but separate qm regions MM-interact with each other \n')
for qm_region in self.qms:
for qm_atom1 in qm_region:
outfile.write(str(qm_atom1 + 1) + ' ')
for qm_atom2 in qm_region:
if qm_atom1 != qm_atom2:
outfile.write(str(qm_atom2 + 1) + ' ')
outfile.write('\n')
outfile.write('\n')
outfile.write(line)
outfile.close()
return
def get_qm_charges(self, i_current_qm, calculator='Aims',
number_of_link_atoms = 0):
"""
Get partial charges on QM atoms.
The charges at link atoms are not returned.
"""
if calculator == 'Aims':
infile = open('aims'+str(i_current_qm)+'.out','r')
lines = infile.readlines()
infile.close()
qm_charges = []
for line in lines:
if ('Hirshfeld charge ' in line):
qm_charges.append(float(line.split()[4]))
sum_qm_charges = sum(qm_charges)
#delete charges of link atoms
if (number_of_link_atoms > 0):
del qm_charges[-number_of_link_atoms:]
return sum_qm_charges, qm_charges
def get_topology_lines(self, lines):
""" Get lines including charges of atoms (ok_lines)
also comments in these lines (comment_lines)
and lines before and after these lines
(lines_before and lines_after)
"""
lines_before = []
lines_change = []
lines_after = []
do_lines_before = True
do_lines_change = False
for line in lines:
if (' bonds ') in line:
do_lines_change = False
if do_lines_before:
lines_before.append(line)
elif do_lines_change:
lines_change.append(line)
else:
lines_after.append(line)
if (' atoms ') in line:
do_lines_before = False
do_lines_change = True
#kill comments and empty lines,
#get the charge in the topology file
comment_lines = []
lines_ok = []
for iline in range(len(lines_change)):
if lines_change[iline].startswith(';'):
comment_lines.append(lines_change[iline])
elif not lines_change[iline].strip():
pass
else:
try:
#new charge = float(lines_change[iline].split()[6])
#new charge_orig = charge_orig + charge
#top_charge.append(charge)
lines_ok.append(lines_change[iline])
except:
print('error in reading gromacs topology')
print('line is')
print('%s' % lines_change[iline])
sys.exit()
return lines_before, comment_lines, lines_ok, lines_after
def set_qm_charges_to_mm_topology(self):
""" Set qm charges to qm atoms of MM topology based on
a QM calculation.
1) The charges of link atoms are neglected.
2) The charge of a qm atom next to the link atom is set to be the
same value as in the original topology file. (trying to
avoid the artificial polarization due to qmAtom-linkH).
3) the total charge of the system (all QM and MM atoms) should be
the same as in the original classical system. Therefore, all the
QM atoms will gain/loose an equal amount of charge in the MM topology
file.
"""
infile = open(self.mm_calculator.topology_filename,'r')
lines = infile.readlines()
infile.close()
(lines_before, comment_lines, lines_ok, lines_after) = \
self.get_topology_lines(lines)
#check that the atom numering is ok
for iline in range(len(lines_ok)):
atom_nr = iline + 1
if int(lines_ok[iline].split()[0]) != atom_nr:
print('2: error in reading gromacs topology')
print('line is')
print('%s' % lines_ok[iline])
sys.exit()
# get the total charge of non-link H atoms in the current qm system
# The charges of edge atoms and their H neighbors
# are taken from topology
# (they are unchanged, it is not from QM calculations)
for iqm, qm in enumerate(self.qms):
charges = self.qm_charges[iqm]
charges_ok = charges
qm_charge_no_link_edge_mm = 0.0
n_qm_charge_atoms = 0
for qm_atom, charge in zip(qm, charges):
if qm_atom not in self.constant_charge_qms:
qm_charge_no_link_edge_mm = \
qm_charge_no_link_edge_mm + charge
n_qm_charge_atoms = n_qm_charge_atoms + 1
# correct the total charge to be equal the original one
# in the topology file by
# adding/ substracting missing/extra charge on
# non-edge and non-single neighbor next neib QM atoms
change_charge = \
( self.classical_target_charge_sums[iqm] - \
qm_charge_no_link_edge_mm)/\
float(n_qm_charge_atoms)
for iqmatom, qmatom in enumerate(qm):
if qmatom not in self.constant_charge_qms:
charges_ok[iqmatom] = charges[iqmatom] + change_charge
# set qm charges to the lines of gromacs topology file
for iqmatom, qmatom in enumerate(qm):
if qmatom not in self.constant_charge_qms:
lines_ok[qmatom] = \
lines_ok[qmatom][0:45]\
+str(round((charges_ok[iqmatom]),5)).rjust(11)+\
lines_ok[qmatom][56:70]
# write out the new topology file
sum_charge = 0.0
for iline in range(len(lines_ok)):
sum_charge = sum_charge + float(lines_ok[iline][46:56])
comment = '; qtot '+str(round(sum_charge,4))+'\n'.ljust(12)
outfile = open(self.mm_calculator.topology_filename, 'w')
for line in lines_before:
outfile.write(line)
for line in comment_lines:
outfile.write(line)
sum_charge = 0.0
for line in lines_ok:
sum_charge = sum_charge + float(line[46:56])
comment = '; qtot '+str(round(sum_charge,4)).ljust(11)+'\n'
outfile.write(line[0:70]+comment)
outfile.write('\n')
for line in lines_after:
outfile.write(line)
outfile.close()
#------------------------------------------------------------------
#------Below the stuff needed for initializing the QM/MM system ---
#------Setting up link atoms, defining QM and MM regions ----------
#------------------------------------------------------------------
def get_edge_qm_and_mm_atoms(self, qms, system):
""" Get neighbors of QM atoms (MM-link-atoms) that are not in QM
(there may be many QM regions)
edge-QM atom can NOT be neighbored by H atom(s)
also get edge-QM atoms
"""
masses = system.get_masses()
mms1 = []
qms1 = []
setmms1 = set([])
setqms1 = set([])
for qm in qms:
link_mm_atoms = []
link_qm_atoms = []
for qm_atom in qm:
indices, offsets = self.neighbor_list.get_neighbors(qm_atom)
for neib_atom in indices:
if neib_atom not in qm:
link_mm_atoms.append(neib_atom)
#take unique atoms of flattened list
link_mm_atoms = list(set(link_mm_atoms))
# Kill MM atoms that are H atoms in the neighborlist
oklink_mm_atoms = []
for index in link_mm_atoms:
if masses[index] > 1.5:
oklink_mm_atoms.append(index)
else:
print('WARNING:')
print('qm system cannot be bond to H atoms')
print('problem atom index is (numbering from 1): %s' \
% str(index+1))
print('if this is water H you should consider including it')
print('in QM')
#sys.exit()
#get indexes of QM edge atoms,
# one qm atom can be more then one time an edge atom
# (then this QM atom will have more than one link atoms)
for link_mm_atom in oklink_mm_atoms:
indices, offsets = \
self.neighbor_list.get_neighbors(link_mm_atom)
for neib_atom in indices:
if neib_atom in qm:
link_qm_atoms.append(neib_atom)
mms1.append(oklink_mm_atoms)
qms1.append(link_qm_atoms)
setmms1 |= set(oklink_mm_atoms)
setqms1 |= set(link_qm_atoms)
return mms1, qms1, setmms1, setqms1
def get_next_neighbors(self, atom_indexes, prohibited_set):
""" Get neighbors of all atoms in 'atom_indexes'
that are not in 'prohibited_set'.
'atom_indexes' is a list of list in which atom indexes belonging
of each QM region is a separate list, that is
[[QM1 atom_indexes], [QM2 atom_indexes], ...]
"""
list_neibs = []
set_list_neibs = set([])
for current_atoms in atom_indexes:
neibs = []
set_current_atoms = set(current_atoms)
for current_atom in current_atoms:
indices, offsets = \
self.neighbor_list.get_neighbors(current_atom)
setneib = set(indices)
neibs += list(setneib - set_current_atoms-prohibited_set)
list_neibs.append(neibs)
set_list_neibs |= set(neibs)
return list_neibs, set_list_neibs
def get_constant_charge_qms(self, set_qms_edge, set_second_qms):
""" get indices of all qm atoms whose charge in MM
calculations is taken from the original MM-topology
(not from the QM calculation). These atoms are edge QM atoms
and their neighbors in QM which have only one neighbor.
At least C(edge-qm)-H(second-edge-qm) and C(edge-qm)=O(second-edge-qm)
"""
set_charge_exclusion = set_qms_edge
for second_qms in set_second_qms:
indices, offsets = self.neighbor_list.get_neighbors(second_qms)
if len(indices)== 1:
set_charge_exclusion.add(second_qms)
return set_charge_exclusion
def get_eq_distances_xy(\
self, topfilename = 'gromos.top', force_field = 'oplsaa'):
"""
The link atom is positioned as in
J. Chem. Theory Comput 2011, 7, 761-777, Eq 1
For this purpose we need the equilibrium length of each
QM-MM covalent bond. Those are obtained here from the
files of the force field.
"""
import os
print('in get_eq_distances_xy, topfilename=')
print ('%s' % topfilename)
for qm in self.qms_edge:
equilibrium_distance_xy = []
for iqm in qm:
equilibrium_distance_xy.append(0.0)
self.equilibrium_distances_xy.append(equilibrium_distance_xy)
#get the version of the topology file where one sees the bond
# force constants (file is named as gromacs.top.dump)
try:
os.remove(self.mm_calculator.base_filename+'.tpr.dump')
except OSError:
pass
os.system('gmxdump -s '+ self.mm_calculator.base_filename\
+'.tpr > ' + \
self.mm_calculator.base_filename+ \
'.tpr.dump 2>/dev/null')
if 'GMXDATA' in os.environ:
gromacs_home = os.environ['GMXDATA'].split(':')[0]
else:
gromacs_home = '/usr/local/gromacs/share/gromacs/'
#read the bonded force constants of this force field in order to
#get an estimate for X-Y bond constant
linesff = open(gromacs_home+ '/top/'+ force_field+ \
'.ff/ffbonded.itp', 'r').readlines()
oklinesff = []
start = False
for line in linesff:
if 'bondtypes' in line:
start = True
elif '[' in line:
break
if start and (line.strip()):
oklinesff.append(line)
#lines for getting oplsaa atom dual-types
if 'opls' in force_field:
lines_for_dual_types = open(gromacs_home+ '/top/'+ force_field+ \
'.ff/ffnonbonded.itp', 'r').readlines()
#read the types of interaction for bond stretching
lines_tpr = open(self.mm_calculator.base_filename+\
'.tpr.dump', 'r').readlines()
#read the topology file to get QM atom type
lines_top = open(topfilename, 'r').readlines()
oklines_top = []
start = False
for line in lines_top:
if start and ('[' in line):
break
if start:
if (not line.startswith(';')) or (not line.strip()):
oklines_top.append(line)
if '[ atoms' in line:
start = True
#get force constant and bond eq distance for all QM-MM bonds
#
ok_equilibrium_distances_xy = []
ok_qmatom_types = []
ok_mmatom_types = []
for qm0, mm0, eqsxy in zip(
self.qms_edge, self.mms_edge, \
self.equilibrium_distances_xy):
ok_eqxy = []
ok_qmatom_type = []
ok_mmatom_type = []
for qmatom, mmatom, eqxy in \
zip(qm0, mm0, eqsxy):
#find qm-mm bond in topology file (indexes from 0)
# get the index for interaction
interaction = 'empty'
for line in lines_tpr:
if (' type' in line) and ('BONDS' in line):
if (qmatom == int(line.split()[3])) and \
(mmatom == int(line.split()[4])):
interaction = line.split()[1].lstrip('type=')
break
if (qmatom == int(line.split()[4])) and \
(mmatom == int(line.split()[3])):
interaction = line.split()[1].lstrip('type=')
break
if interaction == 'empty':
print('QM-MM bond not found in topology')
print('atoms are: QM, MM: (from 1 indexing) %s' \
% str(qmatom+1) + str(mmatom+1))
sys.exit()
for line in lines_tpr:
if ('functype['+interaction+']=BONDS') in line:
r_xy0 = float(line.split()[2].rstrip(','))
#get type of the QM atom
qmatom_type = 'empty'
for line in oklines_top:
if (int(line.split()[0] ) == qmatom+ 1):
qmatom_type = line.split()[1]
#oplsaa atom type has a double name,
#the other one is used in file ffbonded.itp
break
if (qmatom_type == 'empty'):
print('problem in QM atom type')
sys.exit()
if 'opls' in force_field:
found = False
for line in lines_for_dual_types:
if (qmatom_type == line.split()[0]):
qmatom_type = line.split()[1]
found = True
break
if not found:
print('problem in QM atom type')
print('with OPLSAA force field dual atom types')
sys.exit()
#get type of the true link-MM atom
mmatom_type = 'empty'
for line in oklines_top:
if (int(line.split()[0] ) == mmatom+ 1):
mmatom_type = line.split()[1]
#oplsaa atom type has a double name,
#the other one is used in file ffbonded.itp
break
if (mmatom_type == 'empty'):
print('problem in MM atom type')
sys.exit()
if 'opls' in force_field:
found = False
for line in lines_for_dual_types:
if (mmatom_type == line.split()[0]):
mmatom_type = line.split()[1]
found = True
break
if not found:
print('problem in MM atom type')
print('with OPLSAA force field dual atom types')
sys.exit()
ok_qmatom_type.append(qmatom_type)
ok_mmatom_type.append(mmatom_type)
if (eqxy != 0.0):
#use eq constant given by the user
ok_eqxy.append(eqxy)
else:
ok_eqxy.append(r_xy0)
ok_equilibrium_distances_xy.append(ok_eqxy)
ok_qmatom_types.append(ok_qmatom_type)
ok_mmatom_types.append(ok_mmatom_type)
outfile = open('qm-mm-linkAtomsInfo.txt','w')
outfile.write(\
'=======================================================\n')
outfile.write('Information about QM-MM boundary(ies) \n')
outfile.write(\
'Created using the Atomic Simulation Environment (ASE) \n')
outfile.write(\
'=======================================================\n')
qmregion_count = 0
# ADD qm-mm-linkAtomsInfo.txt
for qm, mm, eqs_xy, eqs_xh, qmtypes, mmtypes in zip\
(self.qms_edge, self.mms_edge, ok_equilibrium_distances_xy,\
self.equilibrium_distances_xh,\
ok_qmatom_types, ok_mmatom_types):
outfile.write(\
'=======================================================\n')
qmregion_count = qmregion_count+ 1
outfile.write('Parameters related to QM region number '+\
str(qmregion_count)+'\n')
for qmatom, mmatom, eq_xy, eq_xh, qmtype, mmtype in zip\
(qm, mm, eqs_xy, eqs_xh,\
qmtypes, mmtypes):
outfile.write('qm-link-atom-index (from 1): '+str(qmatom)+'\n')
outfile.write('qm-link-atom-type: '+str(qmtype)+'\n')
outfile.write('mm-link-atom-index (from 1): '+str(mmatom)+'\n')
outfile.write('mm-link-atom-type: '+str(mmtype)+'\n')
outfile.write('qm-mm(notH)-equilibrium-distance: '\
+str(eq_xy)+' nm\n')
outfile.write('qm-H-equilibrium-distance(calculated by QM): '\
+str(eq_xh)+' nm\n')
outfile.close()
self.equilibrium_distances_xy = ok_equilibrium_distances_xy
self.qmatom_types = ok_qmatom_types
self.mmatom_types = ok_mmatom_types
return
def write_eq_distances_to_file(
self,
qm_links, filename='linkDATAout.txt'):
"""
Write classical bond equilibrium lengths
for XY (X in QM, Y in MM)
Write QM calculated XH(link atom) bond length (X in QM, H link atom)
"""
outfile = open(filename, 'w')
for iqm_region, qmlink in enumerate (qm_links):
for ilink, dummy in enumerate (qmlink):
data = self.equilibrium_distances_xy[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
data = self.equilibrium_distances_xh[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
data = self.force_constants[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
data = self.qmatom_types[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
data = self.mmatom_types[iqm_region][ilink]
outfile.write(str(data)+' ')
outfile.write('\n')
outfile.close()
return
def read_eq_distances_from_file(self, filename='linkDATAin.txt'):
"""
Read classical bond equilibrium lengths
for XY (X in QM, Y in MM) or XH (X in QM, H link atom)
"""
myfile = open(filename, 'r')
self.equilibrium_distances_xy = []
self.equilibrium_distances_xh = []
self.force_constants = []
self.qmatom_types = []
self.mmatom_types = []
print('Reading X-H and other data from file: %s' % filename)
for qm in self.qms_edge:
equilibrium_distance_xy = []
equilibrium_distance_xh = []
force_constant = []
qmatom_type = []
mmatom_type = []
for iqm, dum in enumerate(qm):
line = myfile.readline()
equilibrium_distance_xy.append(float(line.split()[0]))
line = myfile.readline()
equilibrium_distance_xh.append(float(line.split()[0]))
line = myfile.readline()
force_constant.append(float(line.split()[0]))
line = myfile.readline()
qmatom_type.append(line.split()[0])
line = myfile.readline()
mmatom_type.append(line.split()[0])
self.equilibrium_distances_xy.append(equilibrium_distance_xy)
self.equilibrium_distances_xh.append(equilibrium_distance_xh)
self.force_constants.append(force_constant)
self.qmatom_types.append(qmatom_type)
self.mmatom_types.append(mmatom_type)
myfile.close()
return
def get_eq_qm_atom_link_h_distances(self, system_tmp):
""" get equilibrium QMatom-linkH distances
for all linkH:s
by QM """
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
from scipy.optimize import fmin
def qm_bond_energy_function(x, system_tmp, i_qm_region):
""" get the qm energy of a single qm system with a given
edge-qm-atom---link-h-atom distances of that qm region
The qm region is i_qm_region, all
edge-qm-atom---link-h-atom distance in this qm_region are
optimized simultaneously
"""
BIG_VALUE = 100000000.0
for index_x, current_x in enumerate(x):
self.equilibrium_distances_xh\
[i_qm_region][index_x] = current_x
print('current X-H bond lengths [nm]')
print('%s' % str(x))
self.link_atoms = self.get_link_atoms(\
self.qms_edge, self.mms_edge,\
self.force_constants,\
self.equilibrium_distances_xh, \
self.equilibrium_distances_xy)
self.qmsystems = \
self.define_QM_clusters_in_vacuum(system_tmp)
#try:
single_qm_energy = self.calculate_single_qm(\
self.qmsystems[i_qm_region],\
self.qm_calculators[i_qm_region])
#except RuntimeError:
# single_qm_energy = BIG_VALUE
return single_qm_energy
print('=====================================================')
print('Calculating X-H bond lengths and bond force constants')
print('by QM in one shot for each QM region.')
print('In later calculations you can: ')
print('cp linkDATAout.txt linkDATAin.txt')
print("and set link_info = 'byFILE'")
print('=====================================================')
self.equilibrium_distances_xh = []
self.force_constants = []
for qm_edges in self.qms_edge:
force_constants = []
equilibrium_distances_xh = []
for qm_edge in qm_edges:
force_constants.append(0.0)
equilibrium_distances_xh.append(0.11)
self.force_constants.append(force_constants)
self.equilibrium_distances_xh.append(equilibrium_distances_xh)
#loop over qm regions. To get optimal simultaneous
# edgeQMatom-linkH distance(s) in [nm] in that qm region
for i_qm_region in range(len(self.qms_edge)):
print('NOW running : ')
print('QM region for optimising edge-linkH distances %s'\
% str(i_qm_region))
x = self.equilibrium_distances_xh[i_qm_region][:]
xopt = fmin(qm_bond_energy_function, \
x,\
args=(system_tmp, i_qm_region),\
xtol=0.0001, ftol=0.0001)
for index_xopt, current_xopt in enumerate(xopt):
self.equilibrium_distances_xh\
[i_qm_region][index_xopt] = current_xopt
print('i_qm_region, i_link_atom, optimal X-H bond[nm] %s' \
% (str(i_qm_region) + ' ' + str(index_xopt) \
+ ' ' + str(current_xopt)))
def define_QM_clusters_in_vacuum(self, system):
""" Returns Each QM system as an Atoms object
We get a list of these Atoms objects
(in case we have many QM regions).
"""
from ase import Atoms
qmsystems = []
for qm0 in self.qms:
tmp_system = Atoms()
for qmatom in qm0:
tmp_system += system[qmatom]
qmsystems.append(tmp_system)
for link_atom in self.link_atoms:
tmp_atom = link_atom.get_link_atom()
qm_region = link_atom.get_link_atom_qm_region_index()
link_atom_index_in_qm = len(qmsystems[qm_region])
qmsystems[qm_region].append(tmp_atom)
link_atom.set_link_atom_index_in_qm(link_atom_index_in_qm)
return qmsystems
def kill_top_lines_containing_only_qm_atoms(self, \
intopfilename, \
qms, outtopfilename):
"""
Delete all lines in the topology file that contain only qm atoms
in bonded sections
(bonds, angles or dihedrals)
and in pairs section (1-4 interactions)
"""
# get an index of all qm atoms in all qm regions
qm = set()
for qm_tmp in qms:
qm = qm.union(set(qm_tmp))
infile = open(intopfilename,'r')
lines = infile.readlines()
infile.close()
outfile = sys.stdout
oklines = []
accept = True
check = ''
for line in lines:
if (('[ bonds' in line)):
oklines.append(line)
accept = False
check = 'bond'
elif (('[ angles' in line)):
oklines.append(line)
accept = False
check = 'angle'
elif (('[ dihedrals' in line)):
oklines.append(line)
accept = False
check = 'dihedral'
elif (('[ pairs' in line)):
oklines.append(line)
accept = False
check = 'pair'
elif ('[' in line):
oklines.append(line)
accept = True
check = ''
elif line in ['\n']:
oklines.append(line)
accept = True
check = ''
elif accept:
oklines.append(line)
else:
indexes = [int(float(s)-1.0) \
for s in line.split() if s.isdigit()]
indexes1 = [int(s) for s in line.split() if s.isdigit()]
if indexes == []:# this takes comment line
#after bond, angle, dihedral
oklines.append(line)
elif check == 'bond':
bondedatoms = set(indexes[0:2])
#set empty bond intereaction for qm-qm bonds (type 5)
#(this way LJ and electrostatics is not messed up)
if (bondedatoms.issubset(qm)):
newline = str(indexes1[0]).rjust(8)+\
str(indexes1[1]).rjust(8)+\
('5').rjust(8) + '\n'
oklines.append(newline)
else:
oklines.append(line)
elif check == 'angle':
bondedatoms = set(indexes[0:3])
if (bondedatoms.issubset(qm)):
pass
else:
oklines.append(line)
elif check == 'dihedral':
bondedatoms = set(indexes[0:4])
if (bondedatoms.issubset(qm)):
pass
else:
oklines.append(line)
elif check == 'pair':
bondedatoms = set(indexes[0:2])
if (bondedatoms.issubset(qm)):
pass
else:
oklines.append(line)
outfile = open(outtopfilename,'w')
for line in oklines:
outfile.write(line)
outfile.close()
return
def get_classical_target_charge_sums(self, intopfilename, qms):
""" get sum of MM charges of the charged changed by QM
these are qm atoms that are not link-atoms or edge-qm atoms
xxx this has a problem:
Water is in .itp files, not in topology...
"""
infile = open(intopfilename,'r')
lines = infile.readlines()
infile.close()
(lines_before, comment_lines, ok_lines, lines_after) = \
self.get_topology_lines(lines)
classical_target_charge_sums = []
for iqm, qm in enumerate(qms):
classical_target_charge_sum = 0.0
for line in ok_lines:
atom_index = int(line.split()[0])-1
if (atom_index in qm) and \
(not(atom_index in self.constant_charge_qms)):
classical_target_charge_sum = \
classical_target_charge_sum + \
float(line.split()[6])
classical_target_charge_sums.\
append(classical_target_charge_sum)
return classical_target_charge_sums
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.