repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mugizico/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
lateral/hyperplane-hasher | test_classes/test_key_value_store.py | 2 | 21404 | from nn.hh_ensemble_lookup import *
from nn.dictionary_store import *
from ann.hyperplane_hasher import HyperplaneHasher, NORMAL_VECTOR_ID, NUM_NORMAL_VECS_ID, CHAMBER_ID
import unittest, copy, string, numpy as np, random as rd, pandas as pd
RANK = 10
NAME = 'test_HHENL'
METRIC = 'l2'
NNV = 5
NHH = 4
NUM_VECS = 30
class TestHHEnsembleLookup(unittest.TestCase):
def setUp(self):
"""Create a HHEnsembleLookup object whose underlying KeyValueStore object
is a DictionaryStore instance populated by NUM_VECS feature vectors."""
self.letters = list(string.ascii_lowercase)
self.feature_vecs = HyperplaneHasher._random_vectors(NUM_VECS, RANK)
self.feature_vecs_ids = ['%i' % i for i in range(NUM_VECS)]
self.hhenl = self._create_hhenl()
for pair in zip(self.feature_vecs, self.feature_vecs_ids):
vec, vec_id = pair
self.hhenl.add_vector(vec, vec_id)
def _create_hhenl(self):
"""Returns an empty instance of HHEnsembleNNLookup."""
kvstore = DictionaryStore(dict())
return HHEnsembleNNLookup(rank=RANK, name=NAME, metric = METRIC, num_normal_vectors=NNV, num_hyperplane_hashers=NHH, kvstore=kvstore)
def _get_all_hh_labels(self, hh):
"""Returns the set of all labels in all chambers of hh."""
ch_ids = hh.get_chamber_ids()
list_set_labels = [hh.chamber_labels(ch_id) for ch_id in ch_ids]
return reduce(lambda x, y: x.union(y), list_set_labels)
def _rank_error(self, function):
"""Throws ValueError if len(vec) != self.rank."""
vec = self.feature_vecs[0]
vec_id = self.feature_vecs_ids[0]
self.assertRaises(ValueError, function, *[vec[1:], vec_id])
def _bulk_rank_error(self, function):
"""Throws ValueError if len(vec) != self.rank for any vec in vecs."""
vec_short = HyperplaneHasher._random_vectors(1, self.hhenl.rank - 1)
vecs = HyperplaneHasher._random_vectors(10, self.hhenl.rank) + vec_short
vec_ids = self.letters[:11]
self.hhenl._bulk_label_chamber_ensemble(vecs[:10], vec_ids[:10])
self.assertRaises(ValueError, function, *[vecs, vec_ids])
def _bulk_list_length_error(self, function):
"""Throws ValueError if len(vec_ids) != len(vec_ids)."""
vecs = HyperplaneHasher._random_vectors(10, self.hhenl.rank)
vec_ids = self.letters[:11]
self.assertRaises(ValueError, function, *[vecs, vec_ids])
def test_init_1(self):
"""Class attributes are correctly initialised."""
self.assertEqual(RANK, self.hhenl.rank)
self.assertEqual(METRIC, self.hhenl.metric)
self.assertEqual(NNV, self.hhenl.num_normal_vectors)
self.assertEqual(NHH, self.hhenl.num_hyperplane_hashers)
def test_init_2(self):
"""Attribute self.hhs is a list of HyperplaneHasher objects of
length = self.num_hyperplane_hashers. Each HH object has the expected
value for 'num_normal_vectors'."""
hhs = self.hhenl.hhs
self.assertIsInstance(hhs, list)
for hh in hhs:
self.assertIsInstance(hh, HyperplaneHasher)
self.assertEqual(hh.num_normal_vectors, NNV)
self.assertEqual(len(hhs), self.hhenl.num_hyperplane_hashers)
def test_init_3(self):
"""Total set of labels in all chambers of any given HyperplaneHasher object
equals set(self.feature_vecs_ids)."""
hhs = self.hhenl.hhs
for hh in hhs:
chamber_ids = set([hh.get_chamber_id(vec) for vec in self.feature_vecs])
labels_set_list = [hh.chamber_labels(ch_id) for ch_id in chamber_ids]
labels_set = reduce(lambda x, y: x.union(y), labels_set_list)
self.assertEqual(labels_set, set(self.feature_vecs_ids))
self.assertEqual(len(labels_set), NUM_VECS)
def test_label_chamber_ensemble_1(self):
"""For each underlying HyperplaneHasher object, a new label is
added to precisely one chamber. The set of chamber ids present as keys
in self.kvstore is either unchanged, or enlarged by one element."""
feature_vecs = self.feature_vecs
old_chamber_ids = {hh: set([hh.get_chamber_id(vec) for vec in feature_vecs]) for hh in self.hhenl.hhs}
old_chamber_labels = {hh: [hh.chamber_labels(ch_id) for ch_id in old_chamber_ids[hh]] for hh in self.hhenl.hhs}
new_vec = HyperplaneHasher._random_vectors(1, self.hhenl.rank)[0]
self.hhenl._label_chamber_ensemble(new_vec, 'new_vec_id')
feature_vecs.append(new_vec)
new_chamber_ids = {hh: set([hh.get_chamber_id(vec) for vec in feature_vecs]) for hh in self.hhenl.hhs}
new_chamber_labels = {hh: [hh.chamber_labels(ch_id) for ch_id in new_chamber_ids[hh]] for hh in self.hhenl.hhs}
for hh in self.hhenl.hhs:
len_diff = len(new_chamber_ids[hh]) - len(old_chamber_ids[hh])
self.assertIn(len_diff, [0, 1])
if len_diff == 0:
#vector 'new_vec' has landed in an existing chamber.
#the set of chamber ids thus remains unchanged, but
#exactly one chamber has exactly one new label,
#namely 'new_vec_id'
self.assertEqual(old_chamber_ids[hh], new_chamber_ids[hh])
comparison = list(np.array(old_chamber_labels[hh]) == np.array(new_chamber_labels[hh]))
expected_bools = set([False] + [True] * (len(old_chamber_ids) - 1))
self.assertEqual(set(comparison), expected_bools)
label_diff = new_chamber_labels[hh][comparison.index(False)].difference(old_chamber_labels[hh][comparison.index(False)])
self.assertEqual(label_diff, set(['new_vec_id']))
if len_diff == 1:
#vector 'new_vec' has landed in a new chamber.
#The id of the new chamber is that of the chamber to
#which 'new_vec' belongs, and the new chamber
#is exactly set(['new_vec_id']).
id_diff = new_chamber_ids[hh].difference(old_chamber_ids[hh])
self.assertEqual(id_diff, set([hh.get_chamber_id(new_vec)]))
labels_diff = [entry for entry in new_chamber_labels[hh] if entry not in old_chamber_labels[hh]][0]
self.assertEqual(labels_diff, set(['new_vec_id']))
def test_label_chamber_ensemble_2(self):
"""Throws ValueError if len(vec) != self.rank."""
new_vec_short = HyperplaneHasher._random_vectors(1, self.hhenl.rank - 1)[0]
self.assertRaises(ValueError, self.hhenl._label_chamber_ensemble, *[new_vec_short, 'new_vec_short_id'])
def test_bulk_label_chamber_ensemble_1(self):
"""Throws ValueError if len(vec) != self.rank for any vec in vecs."""
vec_short = HyperplaneHasher._random_vectors(1, self.hhenl.rank - 1)
vecs = HyperplaneHasher._random_vectors(10, self.hhenl.rank) + vec_short
vec_ids = self.letters[:11]
self.hhenl._bulk_label_chamber_ensemble(vecs[:10], vec_ids[:10])
self.assertRaises(ValueError, self.hhenl._bulk_label_chamber_ensemble, *[vecs, vec_ids])
def test_bulk_label_chamber_ensemble_2(self):
"""Throws ValueError if len(vec_ids) != len(vec_ids)."""
self._bulk_list_length_error(self.hhenl._bulk_label_chamber_ensemble)
def test_bulk_label_chamber_ensemble_3(self):
"""If vec_ids are all unknown, then for each hh in self.hhenl.hhs, the difference in the
union over all chamber_ids in hh.get_chamber_ids() of hh.chamber_labels(chamber_id), before
and after the bulk_label, is equal to vec_ids."""
vecs = HyperplaneHasher._random_vectors(10, self.hhenl.rank)
vec_ids = self.letters[:10]
labels_before = [self._get_all_hh_labels(hh) for hh in self.hhenl.hhs]
self.hhenl._bulk_label_chamber_ensemble(vecs, vec_ids)
labels_after = [self._get_all_hh_labels(hh) for hh in self.hhenl.hhs]
for b, a in zip(labels_before, labels_after):
self.assertEqual(a.difference(b), set(vec_ids))
def test_bulk_label_chamber_ensemble_4(self):
"""If vec_ids are partially known, then for each hh in self.hhenl.hhs, the difference in the
union over all chamber_ids in hh.get_chamber_ids() of hh.chamber_labels(chamber_id), before
and after the bulk_label, is equal to the unknown vec_ids."""
vecs = HyperplaneHasher._random_vectors(24, self.hhenl.rank)
old_vec_ids = self.feature_vecs_ids[:11]
new_vec_ids = self.letters[:13]
vec_ids = old_vec_ids + new_vec_ids
labels_before = [self._get_all_hh_labels(hh) for hh in self.hhenl.hhs]
self.hhenl._bulk_label_chamber_ensemble(vecs, vec_ids)
labels_after = [self._get_all_hh_labels(hh) for hh in self.hhenl.hhs]
for b, a in zip(labels_before, labels_after):
self.assertEqual(a.difference(b), set(new_vec_ids))
def test_bulk_label_chamber_ensemble_5(self):
"""Let first = [first_1, first_2, ..., first_n] and second = [second_1, second_2, ..., second_n] be
lists of labels, and vecs = [vec_1, vec_2, ..., vec_n] a list of vectors. Then after applying the method
first to (vecs, first), then to (vecs, second), all chambers C in all hh in self.hhenl.hhs have the property
that first_i in C iff second_i in C."""
vecs = HyperplaneHasher._random_vectors(20, self.hhenl.rank)
first_ex = re.compile(r'first_([\S]*)')
second_ex = re.compile(r'second_([\S]*)')
first = ['first_%i' % i for i in range(20)]
second = ['second_%i' % i for i in range(20)]
self.hhenl._bulk_label_chamber_ensemble(vecs, first)
self.hhenl._bulk_label_chamber_ensemble(vecs, second)
for hh in self.hhenl.hhs:
ch_ids = hh.get_chamber_ids()
for ch_id in ch_ids:
labels = hh.chamber_labels(ch_id)
flabels = [''.join(first_ex.findall(label)) for label in labels]
first_labels = set([entry for entry in flabels if len(entry) > 0])
slabels = [''.join(second_ex.findall(label)) for label in labels]
second_labels = set([entry for entry in slabels if len(entry) > 0])
self.assertEqual(first_labels, second_labels)
def test_get_nn_candidates_1(self):
"""Returned objects is a set of strings of length
at least num_neighbours."""
vec = HyperplaneHasher._random_vectors(1, self.hhenl.rank)[0]
nn = 10
result = self.hhenl._get_nn_candidates(vec, nn)
self.assertIsInstance(result, set)
for element in result:
self.assertIsInstance(element, str)
self.assertGreaterEqual(len(result), nn)
def test_get_nn_candidates_2(self):
"""Throws ValueError if len(vec_ids) != len(vec_ids)."""
self._rank_error(self.hhenl._get_nn_candidates)
def test_get_vector_ids_1(self):
"""Fetched vector ids are the expected ones."""
self.assertEqual(set(self.feature_vecs_ids), self.hhenl.get_vector_ids())
def test_get_vector_1(self):
"""The returned object is a numpy array of length self.rank."""
vec_id = self.feature_vecs_ids[0]
vec = self.hhenl.get_vector(vec_id)
self.assertIsInstance(vec, np.ndarray)
self.assertEqual(len(vec), self.hhenl.rank)
self.assertTrue((self.feature_vecs[0]==vec).all())
def test_get_vector_2(self):
"""Throws KeyError if 'vec_id' is unrecognised by underlying
KeyValueStore object."""
vec_id = 'non_existent_vec'
self.assertRaises(KeyError, self.hhenl.get_vector, vec_id)
def test_bulk_get_vector_1(self):
"""The returned object is a list of numpy arrays, each of length self.rank."""
def check_vec(vec):
self.assertIsInstance(vec, np.ndarray)
self.assertEqual(len(vec), self.hhenl.rank)
ids = self.feature_vecs_ids
vecs = self.hhenl.bulk_get_vector(ids)
self.assertIsInstance(vecs, list)
[check_vec(vec) for vec in vecs]
def test_bulk_get_vector_2(self):
"""Method returns a list of length equal to the number of recognised
vector ids."""
vec_ids = self.feature_vecs_ids
ids_1 = vec_ids + ['non_existent_vec_%i' % i for i in range(5)]
ids_2 = ['non_existent_vec_%i' % i for i in range(5)]
vecs_1 = self.hhenl.bulk_get_vector(ids_1)
vecs_2 = self.hhenl.bulk_get_vector(ids_2)
self.assertEqual(len(vecs_1), len(vec_ids))
self.assertEqual(len(vecs_2), 0)
def test_bulk_get_vector_3(self):
"""Copies of the stored vectors are returned, rather than the vectors themselves.
Thus changing any of the returned vectors does _not_ affect the stored versions."""
vec_ids = self.feature_vecs_ids
original = self.hhenl.bulk_get_vector(vec_ids)
first = self.hhenl.bulk_get_vector(vec_ids)
for vec in first:
vec[0] = 11.0
second = self.hhenl.bulk_get_vector(vec_ids)
for f, s, o in zip(first, second, original):
self.assertTrue((s == o).all())
self.assertTrue((f != o).any())
def test_get_rank_1(self):
"""Returns a positive integer agreeing with the length
of a known vector, and with the underlying 'rank' attribute."""
vec_id = self.feature_vecs_ids[0]
vec = self.hhenl.get_vector(vec_id)
returned_rank = self.hhenl.get_rank()
self.assertEqual(self.hhenl.rank, returned_rank)
self.assertEqual(len(vec), returned_rank)
def test_delete_vector_1(self):
"""Removes 'vec' both from self.hhenl.kvstore, and from all chambers
of all underlying HyperplaneHasher objects."""
vec = self.feature_vecs[0]
vec_id = self.feature_vecs_ids[0]
self.hhenl.delete_vector(vec, vec_id)
self.assertRaises(KeyError, self.hhenl.get_vector, vec_id)
all_vec_ids = self.hhenl.get_vector_ids()
self.assertNotIn(vec_id, all_vec_ids)
for hh in self.hhenl.hhs:
chamber_id = hh.get_chamber_id(vec)
self.assertNotIn(vec_id, hh.chamber_labels(chamber_id))
def test_delete_vector_2(self):
"""Throws KeyError if 'vec_id' is not a key in the underlying KeyValueStore object,
throws ValueError if len(vec) != self.rank."""
vec = self.feature_vecs[0]
self._rank_error(self.hhenl.delete_vector)
self.assertRaises(KeyError, self.hhenl.delete_vector, *[vec, 'non_existent_id'])
def test_add_vector_1(self):
"""Adds 'vec' both to self.hhenl.kvstore, and to exactly one chamber
of each underlying HyperplaneHasher object. Subsequently, the lists of keys of
vectors in the objects self.hhenl.kvstore and self.hhenl.hhs[i].kvstore
are identical, for all i."""
vec = HyperplaneHasher._random_vectors(1, self.hhenl.rank)[0]
vec_id = 'new'
self.hhenl.add_vector(vec, vec_id)
self.assertTrue((self.hhenl.get_vector(vec_id)==vec).all())
all_vec_ids = self.hhenl.get_vector_ids()
self.assertIn(vec_id, all_vec_ids)
for hh in self.hhenl.hhs:
chamber_id = hh.get_chamber_id(vec)
self.assertIn(vec_id, hh.chamber_labels(chamber_id))
def test_add_vector_2(self):
"""Throws ValueError if len(vec) != self.rank."""
self._rank_error(self.hhenl.add_vector)
def test_bulk_add_vector_1(self):
"""Throws ValueError if len(vec) != self.rank for vec in vecs."""
self._bulk_rank_error(self.hhenl.bulk_add_vector)
def test_bulk_add_vector_2(self):
"""Throws ValueError if len(vec) != self.rank for vec in vecs."""
self._bulk_list_length_error(self.hhenl.bulk_add_vector)
def _check_new_vec_ids_added(self, hhenl, vecs, vec_ids):
"""The set theoretic difference between hhenl.get_vector_ids_post and
self.hhenl.get_vector_ids_pre is equal to the set-theoretic difference
between set(vec_ids) and self.hhenl.get_vector_ids_pre."""
ids_pre = self.hhenl.get_vector_ids()
expected_diff = set(vec_ids).difference(ids_pre)
self.hhenl.bulk_add_vector(vecs, vec_ids)
ids_post = self.hhenl.get_vector_ids()
actual_diff = ids_post.difference(ids_pre)
return (actual_diff, expected_diff)
def test_bulk_add_vector_3(self):
"""The set theoretic difference between self.hhenl.get_vector_ids_post and
self.hhenl.get_vector_ids_pre is equal to the set of new vector ids."""
vecs = self.feature_vecs[:10]
vec_ids = self.letters[:10]
new_vec_ids = self.letters[5:15]
actual_diff, expected_diff = self._check_new_vec_ids_added(self.hhenl, vecs, vec_ids)
self.assertEqual(actual_diff, expected_diff)
actual_diff, expected_diff = self._check_new_vec_ids_added(self.hhenl, vecs, new_vec_ids)
self.assertEqual(actual_diff, expected_diff)
def test_bulk_add_vector_4(self):
"""The method is idempotent."""
vecs = self.feature_vecs[:10]
vec_ids = self.letters[:10]
_, _ = self._check_new_vec_ids_added(self.hhenl, vecs, vec_ids)
actual_diff, expected_diff = self._check_new_vec_ids_added(self.hhenl, vecs, vec_ids)
self.assertEqual(actual_diff, set())
self.assertEqual(actual_diff, set())
def _chamberwise_compare(self, hhenl_1, hhenl_2):
"""Check that the chambers of all hh objects attached to each
of hhenl_1 and hhenl_2 contain the same labels."""
for hh_1, hh_2 in zip(hhenl_1.hhs, hhenl_2.hhs):
hh_1_ids, hh_2_ids = hh_1.get_chamber_ids(), hh_2.get_chamber_ids()
self.assertEqual(self._get_all_hh_labels(hh_1), self._get_all_hh_labels(hh_1))
self.assertEqual(hh_1_ids, hh_2_ids)
for ch_id_1, ch_id_2 in zip(hh_1_ids, hh_2_ids):
print 'Bulk labels'
print hh_1.chamber_labels(ch_id_1)
print 'Serial labels'
print hh_2.chamber_labels(ch_id_2)
self.assertEqual(hh_1.chamber_labels(ch_id_1), hh_2.chamber_labels(ch_id_2))
print '\n'
def _delete_all_vectors(self, hhenl):
"""Calls delete_vector(vec_id) for every vec_id."""
vec_ids = hhenl.get_vector_ids()
vecs = [hhenl.get_vector(vec_id) for vec_id in vec_ids]
for vec, vec_id in zip(vecs, vec_ids):
hhenl.delete_vector(vec, vec_id)
def _create_hhs_chamber_label_list(self, hhenl):
"""Returns a list [ch_label_list_1, ..., chamber_label_list_n] of lists,
where ch_label_list_i is the list of pairs (chamber_id, labels) associated
to the i-th HyperplaneHasher object in hhenl, and labels is the set of labels
in chamber chamber_id."""
hhs_ch_label_list = []
for hh in hhenl.hhs:
ch_ids = list(hh.get_chamber_ids())
ch_ids.sort()
ch_label_list = [(ch_id, hh.chamber_labels(ch_id)) for ch_id in ch_ids]
hhs_ch_label_list.append(ch_label_list)
return hhs_ch_label_list
def test_bulk_add_vector_5(self):
"""Calling the method on (vecs, vec_ids) has the same effect as
calling add_vector(vec, vec_id), for all (vec, vec_id) in
zip(vecs, vec_ids)."""
vecs = self.feature_vecs[:10]
vec_ids = self.letters[:10]
hhenl = self._create_hhenl()
hhenl.bulk_add_vector(vecs, vec_ids)
list_bulk = self._create_hhs_chamber_label_list(hhenl)
self._delete_all_vectors(hhenl)
for vec, vec_id in zip(vecs, vec_ids):
hhenl.add_vector(vec, vec_id)
list_serial = self._create_hhs_chamber_label_list(hhenl)
self.assertEqual(list_bulk, list_serial)
def test_find_neighbours_1(self):
"""Returns a pandas series of length 'num_neighbours', indexed
by keys that can successfully be passed to the get_vector() method.
The entries of 'ser' are non-negative real numbers, in ascending order.
If the input vector is known to the underlying KeyValueStore object,
then the first entry has value 0.0 and key == 'vec_id', where 'vec_id'
is the id of the input vector."""
vec = HyperplaneHasher._random_vectors(1, self.hhenl.rank)[0]
nn = 10
neighbours = self.hhenl.find_neighbours(vec, nn)
self.assertIsInstance(neighbours, pd.Series)
self.assertEqual(len(neighbours), nn)
self.assertTrue((neighbours == neighbours.order()).all())
for i in range(len(neighbours)):
self.assertGreaterEqual(neighbours[i], 0.0)
def test_find_neighbours_2(self):
"""If input vector 'vec' is known to underlying KeyValueStore object,
then first entry of output has value 0.0 and key 'vec_id', the id of 'vec'."""
vec = self.feature_vecs[0]
vec_id = self.feature_vecs_ids[0]
nn = 10
neighbours = self.hhenl.find_neighbours(vec, nn)
self.assertEqual(neighbours.iloc[0], 0.0)
self.assertEqual(neighbours.index[0], vec_id)
def test_find_neighbours_3(self):
"""Throws ValueError if len(vec) != self.rank."""
self._rank_error(self.hhenl.find_neighbours)
| mit |
mattilyra/scikit-learn | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
eranchetz/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cocoaagg.py | 70 | 8970 | from __future__ import division
"""
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad ([email protected])
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
import os, sys
try:
import objc
except:
print >>sys.stderr, 'The CococaAgg backend required PyObjC to be installed!'
print >>sys.stderr, ' (currently testing v1.3.7)'
sys.exit()
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase
from backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasCocoaAgg(thisFig)
return FigureManagerCocoaAgg(canvas, num)
def show():
for manager in Gcf.get_all_fig_managers():
manager.show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(0,0),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_press_event(loc.x, loc.y, button)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print >>sys.stderr, 'Unknown mouse event type:', type
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print >>sys.stderr, 'Unable to load Matplotlib Cocoa UI!'
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
FigureManager = FigureManagerCocoaAgg
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( u'GetCurrentProcess', S(OSErr, OUTPSN) ),
( u'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( u'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( u'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, unicode):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print >>sys.stderr, 'ApplicationServices missing'
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print >>sys.stderr, 'Missing', fn
return False
err, psn = d['GetCurrentProcess']()
if err:
print >>sys.stderr, 'GetCurrentProcess', (err, psn)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print >>sys.stderr, 'CPSSetProcessName', (err, psn)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print >>sys.stderr, 'SetFrontProcess', (err, psn)
return False
return True
| agpl-3.0 |
herberthudson/pynance | pynance/tst/tech/test_movave.py | 1 | 1535 | """
Copyright (c) 2014 Marshall Farrier
license http://opensource.org/licenses/MIT
@author: Marshall Farrier
@contact: [email protected]
@since: 2014-11-02
@summary: Unit tests for data module
"""
import unittest
import numpy as np
import pandas as pd
import pynance as pn
class TestData(unittest.TestCase):
def setUp(self):
session_dates = [
'2014-01-06',
'2014-01-07',
'2014-01-08',
'2014-01-09',
'2014-01-10',
'2014-01-13',
'2014-01-14',
'2014-01-15',
'2014-01-16',
'2014-01-17']
self.equity_data = pd.DataFrame(np.arange(1., 21.).reshape((10, 2)), index=session_dates,
columns=['Volume', 'Adj Close'])
self.equity_data.index.name = 'Date'
def test_ema(self):
_span = 4
_emadf = pn.tech.ema(self.equity_data.loc[:, 'Adj Close'], span=_span)
_prev = _emadf.iloc[0, 0]
# values are increasing but smaller than df values
for i in range(1, len(_emadf.index)):
self.assertTrue(_emadf.iloc[i, 0] < self.equity_data.iloc[i, 1])
self.assertTrue(_emadf.iloc[i, 0] > _prev)
_prev = _emadf.iloc[i, 0]
# values are greater than 2 datapoints back in equity_data
for i in range(2, len(_emadf.index)):
self.assertTrue(_emadf.iloc[i, 0] > self.equity_data.iloc[i - 2, 1])
if __name__ == '__main__':
unittest.main()
| mit |
apbard/scipy | doc/source/tutorial/stats/plots/kde_plot3.py | 132 | 1229 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(12456)
x1 = np.random.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| bsd-3-clause |
bloyl/mne-python | mne/externals/tqdm/_tqdm/std.py | 14 | 55471 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import, division
# compatibility functions and utilities
from .utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, _text_width, \
Comparable, RE_ANSI, _is_ascii, FormatReplace, \
SimpleTextIOWrapper, CallbackIOWrapper
from ._monitor import TMonitor
# native libraries
from contextlib import contextmanager
import sys
from numbers import Number
from time import time
# For parallelism safety
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self, *a, **k):
for lock in self.locks:
lock.acquire(*a, **k)
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
from multiprocessing import RLock
cls.mp_lock = RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
class Bar(object):
"""
`str.format`-able bar with format specifiers: `[width][type]`
- `width`
+ unspecified (default): use `self.default_len`
+ `int >= 0`: overrides `self.default_len`
+ `int < 0`: subtract from `self.default_len`
- `type`
+ `a`: ascii (`charset=self.ASCII` override)
+ `u`: unicode (`charset=self.UTF` override)
+ `b`: blank (`charset=" "` override)
"""
ASCII = " 123456789#"
UTF = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
BLANK = " "
def __init__(self, frac, default_len=10, charset=UTF):
if not (0 <= frac <= 1):
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
def __format__(self, format_spec):
if format_spec:
_type = format_spec[-1].lower()
try:
charset = dict(a=self.ASCII, u=self.UTF, b=self.BLANK)[_type]
except KeyError:
charset = self.charset
else:
format_spec = format_spec[:-1]
if format_spec:
N_BARS = int(format_spec)
if N_BARS < 0:
N_BARS += self.default_len
else:
N_BARS = self.default_len
else:
charset = self.charset
N_BARS = self.default_len
nsyms = len(charset) - 1
bar_length, frac_bar_length = divmod(
int(self.frac * N_BARS * nsyms), nsyms)
bar = charset[-1] * bar_length
frac_bar = charset[frac_bar_length]
# whitespace padding
if bar_length < N_BARS:
return bar + frac_bar + \
charset[0] * (N_BARS - bar_length - 1)
return bar
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optional
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
ncols=ncols, desc=prefix or '', unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix, unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
**extra_kwargs)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
try:
nobar = bar_format.format(bar=full_bar, **format_dict)
except UnicodeEncodeError:
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - _text_width(RE_ANSI.sub('', nobar)))
if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
return bar_format.format(bar=full_bar, **format_dict)
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - _text_width(RE_ANSI.sub('', nobar)))
if ncols else 10,
charset=Bar.BLANK)
return bar_format.format(bar=full_bar, **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Construct the lock if it does not exist
with cls.get_lock():
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning, stacklevel=2)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.clear(nolock=True)
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import DataFrameGroupBy, \
SeriesGroupBy # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, lock_args=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int or float, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive number,
e.g. 9e9.
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
If `None`, will leave only if `position` is `0`.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int or float, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int or float, optional
The initial counter value. Useful when restarting a progress
bar [default: 0]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
lock_args : tuple, optional
Passed to `refresh` for intermediate output
(initialisation, iterating, and updating).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"`nested` is deprecated and automated.\n"
"Use `position` instead for manual control.\n",
fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.lock_args = lock_args
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
self.refresh(lock_args=self.lock_args)
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __bool__(self):
if self.total is not None:
return self.total > 0
if self.iterable is None:
raise TypeError('bool() undefined when iterable == total == None')
return bool(self.iterable)
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except AttributeError:
# maybe eager thread cleanup upon external error
if (exc_type, exc_value, traceback) == (None, None, None):
raise
warn("AttributeError ignored", TqdmWarning, stacklevel=2)
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
return
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
if not hasattr(self, 'sp'):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)` instead of"
" `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int or float, optional
Increment to add to the internal counter of iterations
[default: 1]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)`"
" instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
leave = pos == 0 if self.leave is None else self.leave
with self._lock:
if leave:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=0)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False, lock_args=None):
"""
Force refresh the display of this bar.
Parameters
----------
nolock : bool, optional
If `True`, does not lock.
If [default: `False`]: calls `acquire()` on internal lock.
lock_args : tuple, optional
Passed to internal lock's `acquire()`.
If specified, will only `display()` if `acquire()` returns `True`.
"""
if self.disable:
return
if not nolock:
if lock_args:
if not self._lock.acquire(*lock_args):
return False
else:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
return True
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
if total is not None:
self.total = total
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
@property
def format_dict(self):
"""Public API for read-only member access."""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
@classmethod
@contextmanager
def wrapattr(tclass, stream, method, total=None, bytes=True, **tkwargs):
"""
stream : file-like object.
method : str, "read" or "write". The result of `read()` and
the first argument of `write()` should have a `len()`.
>>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
... while True:
... chunk = fobj.read(chunk_size)
... if not chunk:
... break
"""
with tclass(total=total, **tkwargs) as t:
if bytes:
t.unit = "B"
t.unit_scale = True
t.unit_divisor = 1024
yield CallbackIOWrapper(t.update, stream, method)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| bsd-3-clause |
MoonRaker/cons2-python | cons2/cu.py | 1 | 19734 | # crop.py
# Derek Groenendyk
# 5/3/2016
# Crop classes for xcons program
"""
"""
from datetime import datetime as dt
from datetime import date
from itertools import islice
import logging
import numpy as np
import os
import pandas as pd
import pickle as pkl
logger_fn = logging.getLogger('crop_functions')
logger_fn.setLevel(logging.DEBUG)
logger_cu = logging.getLogger('crop_cu')
class CONSUMPTIVE_USE(object):
"""docstring for CONSUMPTIVE_USE"""
def __init__(self, sp, cp):
self.sp = sp
self.cp = cp
if self.sp.et_method == 'scs':
self.ckc = self.cp.ckc
self.nckc = self.cp.nckc
if self.cp.crop_type == 'ANNUAL':
self.mmnum = self.cp.mmnum
self.ngrwpts = 21
elif self.cp.crop_type == 'PERENNIAL':
self.mmnum = 7
self.ngrwpts = 25
self.grow_dates = {}
self.nyrs = len(self.sp.yrs)
wx_years = list(set(self.sp.wx.data.index.year))
# if (not np.all(wx_years == self.sp.yrs)) or \
if (len(wx_years) < self.nyrs):
logger_cu.warn('Missing years of weather data.')
logger_cu.info('completed init: ' + self.cp.sname)
def calc_temp(self):
self.temps, self.days = self.mmtemp(self.nbegmo,
self.midpts,
self.npart,
self.mmnum)
# might create an issues when they are equal, should probably
# set tempf and dayf to zero. DGG 5/10/2016
if self.nbegmo != self.nendmo:
self.tempf, self.dayf = self.mmtemp(self.nendmo,
self.midptf,
self.nendda,
12)
def set_dates(self):
if self.sp.et_method == 'scs':
atemps = self.sp.wx.data['temp_avg']
self.get_dates(atemps)
elif self.sp.et_method == 'fao':
self.beg = self.cp.beg
self.end = self.cp.end
# self.beg = [self.jln(self.cp.mbegmo, self.cp.mbegda)] * len(self.sp.yrs)
# self.end = [self.jln(self.cp.mendmo, self.cp.mendda)] * len(self.sp.yrs)
def calc_dates(self):
self.nbeg = self.beg[self.yr]
self.nend = self.end[self.yr]
# print(self.nbeg, self.nend, self.cp.sname)
# if self.nend - self.nbeg + 1 > self.cp.ngrows:
# self.nend = self.nbeg + self.cp.ngrows - 1
dates = {}
dates['nbegmo'], dates['nbegda'] = self.clndr(self.nbeg)
dates['nendmo'], dates['nendda'] = self.clndr(self.nend)
# print(dates,self.cp.sname)
self.nbegmo = dates['nbegmo']
self.nbegda = dates['nbegda']
self.nendmo = dates['nendmo']
self.nendda = dates['nendda']
self.grow_dates[self.sp.yrs[self.yr]] = dates
if self.nbeg > self.nend:
logger_cu.warning('Beginning date is after ending' + \
'day for crop: ' + self.cp.sname)
self.calc_midpts()
if self.cp.crop_type == 'ANNUAL':
# winter wheat
if self.nbegmo == self.nendmo:
self.midpts = ((self.nendda - self.nbegda + 1) / 2.) + self.nbegda
elif self.cp.crop_type == 'PERENNIAL':
# Julian spring midpoint of part month
self.midspr = self.nbeg - self.nbegda + self.midpts
self.midfal = self.nend - self.midptf
def calc_kc(self):
if self.cp.crop_type == 'ANNUAL':
self.fake = self.nend - self.nbeg
self.naccum[self.nbegmo-1] = self.jln(self.nbegmo, self.midpts) - self.nbeg
self.nperct[self.nbegmo-1] = (self.naccum[self.nbegmo-1] / \
self.fake) * 100.0
self.midspr = self.nperct[self.nbegmo-1]
self.midfal = self.nperct[self.nendmo-1]
# Interpolate KC for part Spring month
xkc, xf, xkt = self.interp_kc(self.midspr, self.temps, self.days)
self.xkc[self.nbegmo - 1] = xkc
self.xf[self.nbegmo - 1] = xf
self.xkt[self.nbegmo - 1] = xkt
# KC computation for all months
if self.cp.crop_type == 'ANNUAL':
# not winter wheat
if self.nbegmo != self.nendmo:
# not winter wheat
if self.nbegmo != self.nendmo - 1:
self.kc_ann()
self.naccum[self.nendmo-1] = self.nend - self.nbeg - \
(self.nendda + 1)/2.
self.nperct[self.nendmo-1] = (self.naccum[self.nendmo-1] / \
self.fake) * 100.0
elif self.cp.crop_type == 'PERENNIAL':
self.kc_per()
# Interpolate KC for part Fall month
xkc, xf, xkt = self.interp_kc(self.midfal, self.tempf, self.dayf)
self.xkc[self.nendmo - 1] = xkc
self.xf[self.nendmo - 1] = xf
self.xkt[self.nendmo - 1] = xkt
def kc_ann(self):
for k in range(self.nbegmo, self.nendmo-1):
self.naccum[k] = self.jln(k+1, 15) - self.nbeg
self.nperct[k] = (self.naccum[k] / self.fake) * 100.0
flag = True
for j in range(self.ngrwpts):
if self.nckc[j] > self.nperct[k]:
self.xkc[k] = self.ckc[j-1] + (self.ckc[j] - \
self.ckc[j-1]) * ((self.nperct[k] - \
self.nckc[j-1]) / (self.nckc[j] - \
self.nckc[j-1]))
flag = False
break
elif self.nckc[j] == self.nperct[k]:
self.xkc[k] = self.ckc[j]
flag = False
break
if flag:
self.xkc[k] = self.ckc[j-1] + (self.ckc[j] - \
self.ckc[j-1]) * ((self.nperct[k] - \
self.nckc[j-1]) / (self.nckc[j] - \
self.nckc[j-1]))
self.xf[k] = self.atemps[k] * self.sp.pclite[k] / 100.
if self.atemps[k] < 36.0:
self.xkt[k] = 0.3
else:
self.xkt[k] = 0.0173 * self.atemps[k] - 0.314
def kc_per(self):
mid = 15
for k in range(self.nbegmo, self.nendmo-1):
flag = True
for j in range(self.ngrwpts):
if self.nckc[j] == self.jln(k+1, mid):
self.xkc[k] = self.ckc[j]
flag = False
break
if flag:
logger_cu.warning('kc not found')
self.xf[k] = self.atemps[k] * self.sp.pclite[k] \
/ 100.
if self.atemps[k] < 36.0:
self.xkt[k] = 0.3
else:
self.xkt[k] = 0.0173 * self.atemps[k] - 0.314
def calc_cu(self):
self.set_dates()
self.pcu = np.zeros((self.nyrs, 13))
self.pre = np.zeros((self.nyrs, 13))
self.pcuirr = np.zeros((self.nyrs, 13))
self.ppcu = np.zeros((self.nyrs, 13))
self.ppre = np.zeros((self.nyrs, 13))
self.pcuir = np.zeros((self.nyrs, 13))
self.adjureq = np.zeros((self.nyrs))
self.winprep = np.zeros((self.nyrs))
for yr in range(self.nyrs):
self.yr = yr
year = self.sp.wx.data.index.year
self.atemps = self.sp.wx.data[year == \
self.sp.yrs[self.yr]].temp_avg.values
precip = self.sp.wx.data[year == self.sp.yrs[yr]].precipitation.values
self.calc_dates()
self.cu = np.zeros(13)
self.nperct = np.zeros(12, dtype=np.int32)
self.naccum = np.zeros(12, dtype=np.int32)
if self.sp.et_method == 'scs':
self.calc_temp()
self.xf = np.zeros(12)
self.xkt = np.zeros(12)
self.xkc = np.zeros(12)
self.calc_kc()
self.cu[:12] = self.xf * self.xkt * self.xkc
elif self.sp.et_method == 'fao':
self.cu[:12] = self.calc_fao(yr)
re, cuirr = self.calc_effprecip(precip)
self.ppcu[yr] = self.cu
self.ppre[yr] = re
self.pcuir[yr] = cuirr
logger_cu.info('finished cu calculation')
def spring(self, atemp, mean):
"""
Find beginning of growing season day of year.
Parameters
----------
atemp: float
Temperature
mean: float
Critical spring growing season temperature
Returns
-------
jdays: integer
Julian day
"""
if atemp[6] >= mean:
for j in range(0, 7):
i = 6 - j
if atemp[i] <= mean:
break
try:
idiff = 30 * (mean - atemp[i]) / (atemp[i+1] - atemp[i])
except ZeroDivisionError:
idiff = 0
if idiff > 15 and idiff < 31:
month = i + 2
kdays = idiff - 15
elif idiff <= 15 and idiff > 0:
month = i + 1
kdays = idiff + 15
elif idiff >= 31:
month = i + 2
kdays = 15
elif idiff <= 0:
month = i + 1
kdays = 15
else:
month = 7
kdays = 15
kdays = int(round(kdays))
# try:
# kdays = int(round(kdays))
# except OverflowError:
# logger_fn.critical('Invalid kdays value')
# logger_fn.critical('', mean, atemp[i], atemp[i]+1)
# raise
if kdays < 1:
kdays = 1
if month == 2 and kdays > 28:
kdays = 28
# yday = d.toordinal() - date(d.year, 1, 1).toordinal() + 1
jdays = dt(2015, int(month), round(kdays)).timetuple().tm_yday
return jdays
def fall(self, atemp, mean):
"""
Find end of growing season day of year.
Parameters
----------
atemp: float
Temperature
mean: float
Critical spring growing season temperature
Returns
-------
jdays: integer
Julian day
"""
flag = True
for i in range(6, 11):
if atemp[i + 1] < mean:
try:
idiff = 30 * (atemp[i] - mean) / (atemp[i] - atemp[i+1])
except ZeroDivisionError:
idiff = 0
if idiff > 15 and idiff < 31:
month = i + 2
kdays = idiff - 15
elif idiff <= 15 and idiff > 0:
month = i + 1
kdays = idiff + 15
elif idiff >= 31:
month = i + 2
kdays = 15
elif idiff <= 0:
month = i + 1
kdays = 15
flag = False
break
if flag:
month = 12
kdays = 15
kdays = int(round(kdays))
if kdays < 1:
kdays = 1
if month == 2 and kdays > 28:
kdays = 28
jdays = dt(2015, month, kdays).timetuple().tm_yday
return jdays
def clndr(self, doy):
"""
Convert day of year in month and day.
Parameters
----------
doy: integer
Julian day of the year
Returns
-------
month: integer
Month of the year
day: integer
Day of the month
"""
ordinal = date.toordinal(date(2015, 1, 1)) + int(doy) - 1
adate = date.fromordinal(ordinal).timetuple()
month = int(adate[1])
day = int(adate[2])
return month, day
def jln(self, m, d):
return dt(2015, m, d).timetuple().tm_yday
def get_dates(self, temps):
"""
Find the start and end of the
crop growth season in julian days
Parameters
----------
temps: float
Monthly mean Spring temperature
years: list
List of integer years
nbtemp: float
Temperature when the growing season begins
netemp: float
Temperature when the growing season ends
mbegmo: integer
Earliest month season can begin
mbegda: integer
Earliest day season can begin
mendmo: integer
Latest month season can end
mendda: integer
Latest day season can end
Returns
-------
nbeg: integer
Julian day
nend: integer
Julian day
"""
nyrs = len(self.sp.yrs)
self.beg = np.zeros((nyrs), dtype=np.int32)
self.end = np.zeros((nyrs), dtype=np.int32)
# mbeg = self.jln(self.cp.mbegmo, self.cp.mbegda)
# mend = self.jln(self.cp.mendmo, self.cp.mendda)
for yr in range(nyrs):
mbeg = self.jln(self.cp.mbegmo[yr], self.cp.mbegda[yr])
mend = self.jln(self.cp.mendmo[yr], self.cp.mendda[yr])
if mend <= mbeg:
# print(mbeg, mend)
mbeg = mbeg - mend + 1
mend = 365 - mend + 1
atemp = temps[temps.index.year == self.sp.yrs[yr]].values
nstart = self.spring(atemp, self.cp.nbtemp)
self.beg[yr] = nstart
# winter wheat spring
if (nstart - mbeg) <= 0.:
self.beg[yr] = mbeg
mend = self.beg[yr] + self.cp.ngrows
# if 'wheat' in self.cp.sname:
# print(mend)
if mend > 365:
if self.beg[yr] > (mend - 365) - 1:
self.beg[yr] -= (mend - 365) - 1
mend = 365 - (mend - 365)
else:
mend = 365
kend = self.fall(atemp, self.cp.netemp)
self.end[yr] = kend
# winter wheat fall
if (kend - mend) >= 0.:
self.end[yr] = mend
# self.end[yr] = self.beg[yr] + self.cp.ngrows
# if 'wheat' in self.cp.sname.lower():\
# print(self.beg[yr], self.end[yr], mbeg, mend, kend)
if self.beg[yr] > self.end[yr]:
logger_fn.critical('beginning date is after ending date for crop ' + self.cp.sname)
raise
def calc_midpts(self):
"""
Find midpoints of seasons
Parameters
----------
nbegmo: integer
Month growing season begins
nbegda: integer
Day of the month for the beginning of the growing season
nendda:
Day of the month for the end of the growing season
Returns
-------
npart: integer
First part of first month of the season
midpts: integer
Midpoint of the Spring month of the season
midptf: integer
Midpoint of the Fall month of the season
"""
month = [31,28,31,30,31,30,31,31,30,31,30,31]
self.npart = month[self.nbegmo-1] - self.nbegda + 1
self.midpts = int(self.npart / 2. + self.nbegda)
self.midptf = int((self.nendda + 1) / 2.)
def mmtemp(self, nmo, midpt, day2, num):
"""
Caclulates Spring part month mean temperature
Parameters
----------
nmo: integer
Month number
midpt: integer
Midpoint day of the month
day2: integer
Day of month
num: integer
Number of months to use
atemps: list
List of temperatures for the year
pclite: list
List of fraction light for each month
middle: list
Middle day for each month
Returns
-------
temp: float
Mean montly temperature
day: integer
Day of the month
"""
middle = [16, 45, 75, 105, 136, 166, 197, 228, 258, 289, 319, 350]
for k in range(num):
# logger_fn.info(str(self.jln(nmo, midpt)) + ', ' + str(middle[k]))
if self.jln(nmo, midpt) < middle[k]:
temp = self.midtemp(nmo, midpt, k, self.atemps, middle)
day = self.midday(nmo, midpt, day2, k, self.sp.pclite, middle)
return temp, day
elif self.jln(nmo, midpt) == middle[k]:
temp = self.atemps[k]
day = self.sp.pclite[k]
return temp, day
logger_fn.warn("mean monthly temperature not found, month can't be found.")
def midtemp(self, nmo, midpt, k, temp, middle):
"""
Calculates mean monthly temperature
Parameters
----------
"""
day1 = self.jln(nmo, midpt)
return temp[k-1] + ((day1 - middle[k-1]) / (middle[k] - \
middle[k-1])) * (temp[k] - temp[k-1])
def midday(self, nmo, midpt, day2, k, pclite, middle):
month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
day1 = self.jln(nmo, midpt)
return (pclite[k-1] + ((day1 - middle[k-1]) / (middle[k] \
- middle[k-1])) * (pclite[k] - pclite[k-1])) * (day2 / month[nmo-1])
def interp_kc(self, mid, temp, day):
flag = True
for k in range(self.ngrwpts):
if self.nckc[k] > mid:
xkc = self.ckc[k-1] + (self.ckc[k] - self.ckc[k-1]) * ((mid - self.nckc[k-1]) \
/ (self.nckc[k] - self.nckc[k-1]))
flag = False
break
elif self.nckc[k] == mid:
xkc= self.ckc[k]
flag = False
break
if flag:
xkc = self.ckc[k-1] + (self.ckc[k] - self.ckc[k-1]) * ((mid - self.nckc[k-1]) \
/ (self.nckc[k] - self.nckc[k-1]))
xf = temp * day / 100.
if temp < 36.:
xkt = 0.3
else:
xkt = 0.0173 * temp - 0.314
return xkc, xf, xkt
def calc_effprecip(self, precip):
month = [31,28,31,30,31,30,31,31,30,31,30,31]
if self.sp.apdep == 0.:
f = 1.0
elif self.sp.apdep > 0.:
f = 0.531747 + 0.295164 * self.sp.apdep - 0.057697 * self.sp.apdep**2 + 0.003804 * \
self.sp.apdep**3
re = np.zeros((13))
cuirr = np.zeros((13))
for k in range(12):
cu_temp = self.cu[k]
if self.sp.npre != 2:
# verify this calculation - DGG 2/13/2017
if k == self.nbegmo-1:
cu_temp *= month[self.nbegmo-1] / self.npart
if k == self.nendmo-1:
cu_temp *= month[self.nendmo-1] / self.nendda
re[k] = (0.70917 * precip[k]**0.82416 - 0.11556) * f * \
10**(0.02426 * cu_temp)
if k == self.nbegmo-1:
cu_temp *= self.npart / month[self.nbegmo-1]
if k == self.nendmo-1:
cu_temp *= self.nendda / month[self.nendmo-1]
else:
if precip[k] <= 1.0:
re[k] = precip[k] * 0.95
elif precip[k] <= 2.0:
re[k] = ((precip[k] - 1.0) * 0.9) + 0.95
elif precip[k] <= 3.0:
re[k] = ((precip[k] - 2.0) * 0.82) + 1.85
elif precip[k] <= 4.0:
re[k] = ((precip[k] - 3.0) * 0.65) + 2.67
elif precip[k] <= 5.0:
re[k] = ((precip[k] - 4.0) * 0.45) + 3.32
elif precip[k] <= 6.0:
re[k] = ((precip[k] - 1.0) * 0.05) + 4.02
if re[k] < 0.0:
re[k] = 0.0
if re[k] > precip[k]:
re[k] = precip[k]
x = month[k]
if k == self.nbegmo-1:
re[k] = ((x - self.nbegda + 1.0) / x) * re[k]
if k == self.nendmo-1:
re[k] = (self.nendda / x) * re[k]
# winter wheat, double check DGG 4/3/2017
if self.nbegmo-1 == k and self.nendmo-1 == k:
re[k] = ((self.nendda - self.nbegmo + 1) / x) *re[k]
if re[k] > cu_temp:
re[k] = cu_temp
cuirr[k] = cu_temp - re[k]
self.cu[12] += cu_temp
re[12] += re[k]
cuirr[12] += cuirr[k]
return re, cuirr
def calc_adj(self, cuirr, pccrop, precip, nbegmo, day3):
month = [31,28,31,30,31,30,31,31,30,31,30,31]
smosadj = 0.0
for i in range(nbegmo - 1):
smosadj += precip[i]
smosadj += precip[nbegmo] * (month[nbegmo-1] - day3) / month[nbegmo - 1]
if smosadj > 3.0:
smosadj = 3.0
if (cuirr[-1] - smosadj) < 0.0:
smosadj = cuirr[-1]
adjureq = (cuirr[-1] - smosadj) * pccrop / 100.0
# winprep = smosadj * precip / 100.0 # why? what is the purpose?
winprep = 0.0
return adjureq, winprep
def fiveyr_avg(self, yr, fivc, fivcr, pcu, pcuirr):
fivcu = fivc[yr-4] + fivc[yr-3] + fivc[yr-2] + fivc[yr-1] + fivc[yr]
fivci = fivcr[yr-4] + fivcr[yr-3] + fivcr[yr-2] + fivcr[yr-1] + fivcr[yr]
fvcup = pcu[yr-4, -1] + pcu[yr-3, -1] + pcu[yr-2, -1] + pcu[yr-1, -1] + \
pcu[yr, -1]
fvcip = pcuirr[yr - 4, -1] + pcuirr[yr - 3, -1] + pcuirr[yr - 2, -1] + \
pcuirr[yr - 1, -1] + pcuirr[yr, -1]
return fivc, fivci, fvcup, fvcip
def calc_fao(self, yr, repeat=1):
lfrac = self.cp.stages[self.cp.stype]
kc_vals = self.cp.kc[self.cp.kcnum]
start_date = dt(self.sp.yrs[yr], self.nbegmo, self.nbegda)
ETo = np.zeros((12))
Kc = np.zeros((12))
pclite = self.calc_pclite(self.sp.latitude)
for k in range(repeat):
date_rng = pd.date_range(start_date, periods=self.cp.ngrows, freq='D')
months = sorted(list(set(date_rng.month)))
total_days = 1.0
for i in range(len(months)):
num_modays = len(np.nonzero(date_rng.month == months[i])[0])
ind = str(self.sp.yrs[yr]) + '-' + str(months[i])
kc_total = 0
for aday in range(num_modays):
nday = total_days + float(aday)
kc_total += self.calc_faokc(nday/float(self.cp.ngrows), lfrac, kc_vals)
temp_Kc = kc_total/num_modays
p = pclite[months[i]-1]
temp_ETo = self.fao_cu(p, self.atemps[months[i]-1])
temp_ETo *= num_modays
total_days += num_modays
ETo[months[i]-1] += temp_ETo
if Kc[months[i]-1] != 0.:
Kc[months[i]-1] = (Kc[months[i]-1] + temp_Kc)/2
else:
Kc[months[i]-1] = temp_Kc
start_date = date_rng[-1]+1
ETc = ETo*Kc/25.4
return ETc
def calc_pclite(self, lat):
pclite_dict = {}
pclite_dict[30] = np.array([.24, .25, .27, .29, .31, .32, .31, .30, .28, .26, .24, .23])
pclite_dict[35] = np.array([.23, .25, .27, .29, .31, .32, .32, .30, .28, .25, .23, .22])
if lat > 35 or lat < 30:
logger_fn.critical("Latitude out of bounds for pclite. 30* <= lat <= 35* ")
raise
if lat in pclite_dict.keys():
return pclite_dict[lat]
else:
dx = (35. - lat)/(35.-30.)
return pclite_dict[35] + dx*(pclite_dict[30] - pclite_dict[35])
def fao_cu(self, p, tavg):
tavg = (tavg - 32.0) / 1.8 # conversion to Celsius
f = p * (0.46 * tavg + 8.13)
# low humidity, high n/N, medium wind
# a = -2.30
# b = 1.82
# low humidity, high n/N, low wind
a = -2.60
b = 1.55
ETO = a + b * f
return ETO
def calc_faokc(self, frac, lfrac, kc):
if frac < lfrac[0]:
kc_aday = kc[0]
elif frac < np.sum(lfrac[:2]):
dx = np.sum(lfrac[:2]) - lfrac[0]
kc_aday = ((frac-lfrac[0]))*((kc[1] - kc[0])/dx) + kc[0]
elif frac < np.sum(lfrac[:3]):
kc_aday = kc[1]
elif frac <= np.sum(lfrac):
dx = np.sum(lfrac) - np.sum(lfrac[:3])
kc_aday = kc[1] - (frac-np.sum(lfrac[:3]))*((kc[1] - kc[2])/dx)
else: # accounts for inaccuracy in np.sum(), essentially kc_aday = kc[2]
dx = np.sum(lfrac) - np.sum(lfrac[:3])
kc_aday = kc[1] - (frac-np.sum(lfrac[:3]))*((kc[1] - kc[2])/dx)
return kc_aday
| gpl-3.0 |
JEB12345/Advancement_UCSC | Dissertation/tex/ASME-journal/results/testing/logs/script-python-analyzelogs.py | 6 | 5096 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import prettyplotlib as ppl
data = np.loadtxt("muscleRestLengths.csv", delimiter=",")[200:,1:] #discard first 2 seconds and time column
data_normalized = data-data.mean(0)
data_normalized /= data_normalized.std(0)
#plot box data
plt.figure()
plt.boxplot(data,0,'')
#from matplotlib.mlab import PCA
#r = PCA(data_normalized)
#plt.plot(r.fracs,'+-') #only four "principal" components!
#because there are only four steps in the signal!
#plt.figure()
#plt.title("PCA first 4 principal components")
#plt.plot(r.project(data_normalized)[:,:4]+8*np.arange(4))
#now do something useful
#signal periodicity is 400 BTW
#plt.figure()
#plt.plot(data_normalized+np.arange(24)*4,'blue')
#plt.plot(data_normalized[:,11]+11*4,'red',linewidth=4)
#plt.title('Actuator 11 is different!')
#plt.ylabel("muscleRestLengths")
#slow
ncc = np.zeros((data_normalized.shape[1],data_normalized.shape[1]))
for i in xrange(data_normalized.shape[1]):
for j in xrange(data_normalized.shape[1]):
ncc[i,j] = np.abs(np.correlate(data_normalized[:,i],data_normalized[:,j],mode='full')/(data_normalized.shape[0]-1)).max()
#most signals are highly correlated!!!
#let's look at the delays
ncc_delay = np.zeros((data_normalized.shape[1],data_normalized.shape[1]))
for i in xrange(data_normalized.shape[1]):
for j in xrange(data_normalized.shape[1]):
ncc_delay[i,j] = (np.correlate(data_normalized[:,i],data_normalized[:,j],mode='full')).argmax()-5800+1
#time for something simple: the average equilibrium length of each motor
#plt.figure()
#plt.plot(np.sort(data.mean(0)),'+-')
#they all seem different, so the algorithm hasn't learned a symmetric gait
#combine results
#plt.figure()
#plt.subplot(1,2,1)
#plt.imshow(ncc,interpolation='nearest',cmap=plt.cm.gray_r)
#plt.title("Normalized Cross-Correlation")
#plt.colorbar()
#plt.subplot(1,2,2)
#plt.imshow(np.where(np.abs(ncc_delay)%400>=200,400-np.abs(ncc_delay)%400,np.abs(ncc_delay)%400),interpolation='nearest',cmap=plt.cm.gray_r)
#plt.title("Optimal time delay")
#plt.colorbar()
#Let's time shift all the signals to align with signal 0
data_normalized_aligned = np.zeros(data.shape)
for i in xrange(24):
data_normalized_aligned[:,i] = (np.roll(data_normalized[:,i],int(ncc_delay[0,i])))
#THIS LOOKS FUNNY
#plt.figure()
#plt.plot(data_normalized_aligned)
#plt.title("time aligned normalized signals")
#plt.figure()
#for i in xrange(24):
# plt.plot(data_normalized[:,np.argsort(ncc_delay[0])[i]]+2*i)
#plt.title("signals ordered by time diff wrt signal 0, black = delay")
#for i in xrange(10):
# plt.plot(-np.sort(ncc_delay[0])+400*i,np.arange(24)*2,'black',linewidth=4)
#if we look at the PCA of this, then 98% of the variance is explained by the first 2 components!
#plt.figure()
#r2 = PCA(data_normalized_aligned)
#plt.plot(r2.project(data_normalized_aligned)[:,:2]+8*np.arange(2))
#you need a sign wave and a double frequency sign wave
#td = np.zeros(ncc.shape)
#for i in xrange(24):
# td[i,:] = np.sort((ncc_delay[i]+400+int(ncc_delay[0,i]))%800)
#plt.figure()
#plt.title("sequential activation of motors")
#plt.plot(td.T)
##Atil
import scipy.cluster.hierarchy as hclus
#do clustering using the correlation matrix # (1-ncc) or ncc doesn't matter
plt.figure()
linkage_matrix=hclus.linkage(1-ncc,method='ward');
dend = hclus.dendrogram(linkage_matrix,
color_threshold=0.3,
show_leaf_counts=True)
#order the correlation matrix according to the clustering
ncc_ordered = np.zeros((data_normalized.shape[1],data_normalized.shape[1]))
for i in xrange(data_normalized.shape[1]):
for j in xrange(data_normalized.shape[1]):
ncc_ordered[i,j]=ncc[dend['leaves'][i],j]
#show normalized cross correlation coeff
f, axarr = plt.subplots(1,2)
im1 = axarr[0].imshow(ncc,interpolation='nearest',cmap=plt.cm.gray_r)
im2 = axarr[1].imshow(ncc_ordered,interpolation='nearest',cmap=plt.cm.gray_r)
f.colorbar(im2)
#order signals according to the clustering
data_normalized_aligned_ordered = np.zeros(data.shape)
for i in xrange(24):
ii=dend['leaves'][i];
data_normalized_aligned_ordered[:,i] = data_normalized_aligned[:,ii]
f, axarr = plt.subplots(2,1)
im2 = axarr[0].plot(data_normalized+np.arange(24)*4,'blue')
im1 = axarr[1].plot(data_normalized_aligned_ordered+np.arange(24)*4,'blue')
f.axes[0].get_xaxis().set_visible(False)
f.axes[0].get_yaxis().set_visible(False)
f.axes[1].get_yaxis().set_visible(False)
#f.axes[1].text(0.3,0.3,'s\nd\nf\ng',fontsize=10)
for i in xrange(24):
axarr[0].annotate(i,xy=(5800,4*i),xytext=(0,0),textcoords='offset points',fontsize=10)
rect1 = matplotlib.patches.Rectangle((2350-ncc_delay[0,i],4*i-2), 1200, 4, color='red',alpha=0.2)
axarr[0].add_patch(rect1)
rect1 = matplotlib.patches.Rectangle((2750-ncc_delay[0,i],4*i-2), 400, 4, color='yellow')
axarr[0].add_patch(rect1)
axarr[1].annotate(dend['leaves'][i],xy=(5800,4*i),xytext=((i%1),0),textcoords='offset points',fontsize=10)
axarr[1].axvspan(2350,3550,color='red',alpha=0.2)
axarr[1].axvspan(2750,3150,color='yellow')
| mit |
theoryno3/scikit-learn | sklearn/decomposition/nmf.py | 24 | 19057 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
Git3251/trading-with-python | cookbook/reconstructVXX/downloadVixFutures.py | 77 | 3012 | #-------------------------------------------------------------------------------
# Name: download CBOE futures
# Purpose: get VIX futures data from CBOE, process data to a single file
#
#
# Created: 15-10-2011
# Copyright: (c) Jev Kuznetsov 2011
# Licence: BSD
#-------------------------------------------------------------------------------
#!/usr/bin/env python
from urllib import urlretrieve
import os
from pandas import *
import datetime
import numpy as np
m_codes = ['F','G','H','J','K','M','N','Q','U','V','X','Z'] #month codes of the futures
codes = dict(zip(m_codes,range(1,len(m_codes)+1)))
#dataDir = os.path.dirname(__file__)+'/data'
dataDir = os.path.expanduser('~')+'/twpData/vixFutures'
print 'Data directory: ', dataDir
def saveVixFutureData(year,month, path, forceDownload=False):
''' Get future from CBOE and save to file '''
fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:])
if os.path.exists(path+'\\'+fName) or forceDownload:
print 'File already downloaded, skipping'
return
urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName)
print 'Getting: %s' % urlStr
try:
urlretrieve(urlStr,path+'\\'+fName)
except Exception as e:
print e
def buildDataTable(dataDir):
""" create single data sheet """
files = os.listdir(dataDir)
data = {}
for fName in files:
print 'Processing: ', fName
try:
df = DataFrame.from_csv(dataDir+'/'+fName)
code = fName.split('.')[0].split('_')[1]
month = '%02d' % codes[code[0]]
year = '20'+code[1:]
newCode = year+'_'+month
data[newCode] = df
except Exception as e:
print 'Could not process:', e
full = DataFrame()
for k,df in data.iteritems():
s = df['Settle']
s.name = k
s[s<5] = np.nan
if len(s.dropna())>0:
full = full.join(s,how='outer')
else:
print s.name, ': Empty dataset.'
full[full<5]=np.nan
full = full[sorted(full.columns)]
# use only data after this date
startDate = datetime.datetime(2008,1,1)
idx = full.index >= startDate
full = full.ix[idx,:]
#full.plot(ax=gca())
fName = os.path.expanduser('~')+'/twpData/vix_futures.csv'
print 'Saving to ', fName
full.to_csv(fName)
if __name__ == '__main__':
if not os.path.exists(dataDir):
print 'creating data directory %s' % dataDir
os.makedirs(dataDir)
for year in range(2008,2013):
for month in range(12):
print 'Getting data for {0}/{1}'.format(year,month+1)
saveVixFutureData(year,month,dataDir)
print 'Raw wata was saved to {0}'.format(dataDir)
buildDataTable(dataDir) | bsd-3-clause |
dricciardelli/vae2vec | capt_gen_o2e.py | 1 | 29294 | # -*- coding: utf-8 -*-
import math
import os
import tensorflow as tf
import numpy as np
import pandas as pd
import pickle
import pickle as pkl
import cv2
import skimage
import tensorflow.python.platform
from tensorflow.python.ops import rnn
from keras.preprocessing import sequence
from collections import Counter
from collections import defaultdict
import itertools
test_image_path='./data/acoustic-guitar-player.jpg'
vgg_path='./data/vgg16-20160129.tfmodel'
n=2**19-3
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,capts,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi)]
i+=1
else:
dl.pop(i)
word_list.pop(i)
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
# _map,rev_map=get_one_hot_map(word_list,def_list,n)
_map=pkl.load(open('maps.pkl','rb'))
rev_map=pkl.load(open('rev_maps.pkl','rb'))
if num_samples is not None:
num_samples=len(capts)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
# y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n)
# np.save('X',X)
# np.save('y',y)
# np.save('mask',mask)
X=np.load('Xs.npy','r')
y=np.load('yc.npy','r')
mask=np.load('maskc.npy','r')
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(lambda :n+1)
rev_map=defaultdict(lambda:"<UNK>")
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
for num_bits in range(binary_dim):
for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
bitmap=np.zeros(binary_dim)
bitmap[np.array(bit_config)]=1
num=bitmap*(2** np.arange(binary_dim ))
num=np.sum(num).astype(np.uint32)
word=words[i]
_map[word]=num
rev_map[num]=word
i+=1
if i>=len(words):
break
if i>=len(words):
break
# for word in words:
# i+=1
# _map[word]=i
# rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[n+2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
for word in corpus:
mapped=_map[word]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
if not onehot:
rtn=np.zeros([len(corpus),binary_dim],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),2**binary_dim],dtype=np.float32)
for l,line in enumerate(corpus):
# if len(line)==0:
# rtn[l]=n+2
# else:
# if line not in _map:
# total_not+=1
mapped=_map[line]
if mapped==75001:
total_not+=1
if onehot:
binrep=np.zeros(2**binary_dim)
print line
binrep[mapped]=1
else:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l]=binrep
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2,binary_dim],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
if form2:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l,i+1,:]=binrep
else:
rtn[l,i+1]=mapped
if mapped==75001:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
if form2:
rtn[l,x+1,:]=(1&(to_app/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
else:
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class Caption_Generator():
def __init__(self, dim_in, dim_embed, dim_hidden, batch_size, n_lstm_steps, n_words, init_b=None,from_image=False,n_input=None,n_lstm_input=None,n_z=None):
self.dim_in = dim_in
self.dim_embed = dim_embed
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.n_words = n_words
self.n_input = n_input
self.n_lstm_input=n_lstm_input
self.n_z=n_z
if from_image:
with open(vgg_path,'rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
self.images = tf.placeholder("float32", [1, 224, 224, 3])
tf.import_graph_def(graph_def, input_map={"images":self.images})
graph = tf.get_default_graph()
self.sess = tf.InteractiveSession(graph=graph)
self.from_image=from_image
# declare the variables to be used for our word embeddings
with tf.device('/cpu:0'):
self.word_embedding = tf.Variable(tf.random_uniform([self.n_words, self.dim_embed], -0.1, 0.1), name='word_embedding')
self.embedding_bias = tf.Variable(tf.zeros([dim_embed]), name='embedding_bias')
# declare the LSTM itself
self.lstm = tf.contrib.rnn.BasicLSTMCell(dim_hidden)
# declare the variables to be used to embed the image feature embedding to the word embedding space
self.img_embedding = tf.Variable(tf.random_uniform([dim_in, dim_hidden], -0.1, 0.1), name='img_embedding')
self.img_embedding_bias = tf.Variable(tf.zeros([dim_hidden]), name='img_embedding_bias')
# declare the variables to go from an LSTM output to a word encoding output
self.word_encoding = tf.Variable(tf.random_uniform([dim_hidden, self.n_lstm_input], -0.1, 0.1), name='word_encoding')
# initialize this bias variable from the preProBuildWordVocab output
# optional initialization setter for encoding bias variable
if init_b is not None:
self.word_encoding_bias = tf.Variable(init_b, name='word_encoding_bias')
else:
self.word_encoding_bias = tf.Variable(tf.zeros([self.n_lstm_input]), name='word_encoding_bias')
self.embw=tf.Variable(xavier_init(self.n_input,self.n_z),name='embw')
self.embb=tf.Variable(tf.zeros([self.n_z]),name='embb')
self.all_encoding_weights=[self.embw,self.embb]
def build_model(self):
# declaring the placeholders for our extracted image feature vectors, our caption, and our mask
# (describes how long our caption is with an array of 0/1 values of length `maxlen`
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
caption_placeholder = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps, self.n_input])
mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])
self.output_placeholder = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
network_weights = self._initialize_weights()
# getting an initial LSTM embedding from our image_imbedding
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
flat_caption_placeholder=tf.reshape(caption_placeholder,[self.batch_size*self.n_lstm_steps,-1])
#leverage one-hot sparsity to lookup embeddings fast
embedded_input,KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],flat_caption_placeholder,logit=True)
KLD_loss=tf.multiply(KLD_loss,tf.reshape(mask,[-1,1]))
KLD_loss=tf.reduce_sum(KLD_loss)*0
with tf.device('/cpu:0'):
word_embeddings=tf.nn.embedding_lookup(self.word_embedding,tf.reshape(self.output_placeholder,[self.batch_size*self.n_lstm_steps,-1]))
word_embeddings+=self.embedding_bias
word_embeddings=tf.reshape(word_embeddings,[self.batch_size,self.n_lstm_steps,-1])
#initialize lstm state
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
rnn_output=[]
with tf.variable_scope("RNN"):
# unroll lstm
for i in range(self.n_lstm_steps):
if i > 0:
# if this isn’t the first iteration of our LSTM we need to get the word_embedding corresponding
# to the (i-1)th word in our caption
current_embedding = word_embeddings[:,i-1,:]
else:
#if this is the first iteration of our LSTM we utilize the embedded image as our input
current_embedding = image_embedding
if i > 0:
# allows us to reuse the LSTM tensor variable on each iteration
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
rnn_output.append(tf.expand_dims(out,1))
#perform classification of output
rnn_output=tf.concat(rnn_output,axis=1)
rnn_output=tf.reshape(rnn_output,[self.batch_size*(self.n_lstm_steps),-1])
encoded_output=tf.matmul(rnn_output,self.word_encoding)+self.word_encoding_bias
#get loss
normed_embedding= tf.nn.l2_normalize(encoded_output, dim=-1)
normed_target=tf.nn.l2_normalize(embedded_input,dim=-1)
cos_sim=tf.multiply(normed_embedding,normed_target)[:,1:]
cos_sim=(tf.reduce_sum(cos_sim,axis=-1))
cos_sim=tf.reshape(cos_sim,[self.batch_size,-1])
cos_sim=tf.reduce_sum(cos_sim[:,1:]*mask[:,1:])
cos_sim=cos_sim/tf.reduce_sum(mask[:,1:])
self.exp_loss=tf.reduce_sum((-cos_sim))
# self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
total_loss = tf.reduce_sum(-(cos_sim))
#average over timeseries length
# total_loss=tf.reduce_sum(masked_xentropy)/tf.reduce_sum(mask[:,1:])
self.print_loss=total_loss
total_loss+=KLD_loss/tf.reduce_sum(mask)
return total_loss, img, caption_placeholder, mask
def build_generator(self, maxlen, batchsize=1,from_image=False):
#same setup as `build_model` function
img = tf.placeholder(tf.float32, [self.batch_size, self.dim_in])
image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(batchsize,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(image_embedding, state)
previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(maxlen):
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(previous_word, state)
# get a get maximum probability word and it's encoding from the output of the LSTM
logit = tf.matmul(out, self.word_encoding) + self.word_encoding_bias
best_word = tf.argmax(logit, 1)
with tf.device("/cpu:0"):
# get the embedding of the best_word to use as input to the next iteration of our LSTM
previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
previous_word += self.embedding_bias
all_words.append(best_word)
self.img=img
self.all_words=all_words
return img, all_words
def _initialize_weights(self):
all_weights = dict()
trainability=False
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(self.n_z, self.n_lstm_input),name='affine_weight',trainable=trainability),
'affine_bias': tf.Variable(tf.zeros(self.n_lstm_input),name='affine_bias',trainable=trainability)}
if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability),
'out_log_sigma': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_log_sigmab',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(self.n_z, self.n_z),name='out_mean',trainable=trainability),
'out_log_sigma': tf.Variable(xavier_init(self.n_z, self.n_z),name='out_log_sigma',trainable=trainability)}
else:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([self.n_z], dtype=tf.float32),name='out_meanb',trainable=trainability)}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(self.n_z, self.n_z),name='out_mean',trainable=trainability)}
# self.no_reload+=all_weights['input_meaning'].values()
# self.var_embs=[]
# if transfertype2:
# self.var_embs=all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
# self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
# if lstm_stack>1:
# self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
# all_weights['LSTM'] = {
# 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
# 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
# 'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
# 'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
# 'lstm': self.lstm}
all_encoding_weights=[all_weights[x].values() for x in all_weights]
for w in all_encoding_weights:
self.all_encoding_weights+=w
return all_weights
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
x=tf.matmul(x,self.embw)+self.embb
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.n_input))
all_the_f_one_h.append(tf.one_hot(x,depth=self.n_input))
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
# embedding=z
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if not vanilla:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def crop_image(self,x, target_height=227, target_width=227, as_float=True,from_path=True):
#image preprocessing to crop and resize image
image = (x)
if from_path==True:
image=cv2.imread(image)
if as_float:
image = image.astype(np.float32)
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
height, width, rgb = image.shape
if width == height:
resized_image = cv2.resize(image, (target_height,target_width))
elif height < width:
resized_image = cv2.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = cv2.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return cv2.resize(resized_image, (target_height, target_width))
def read_image(self,path=None):
# parses image from file path and crops/resizes
if path is None:
path=test_image_path
img = crop_image(path, target_height=224, target_width=224)
if img.shape[2] == 4:
img = img[:,:,:3]
img = img[None, ...]
return img
def get_caption(self,x=None):
#gets caption from an image by feeding it through imported VGG16 graph
if self.from_image:
feat = read_image(x)
fc7 = self.sess.run(graph.get_tensor_by_name("import/Relu_1:0"), feed_dict={self.images:feat})
else:
fc7=np.load(x,'r')
generated_word_index= self.sess.run(self.generated_words, feed_dict={self.img:fc7})
generated_word_index = np.hstack(generated_word_index)
generated_words = [ixtoword[x] for x in generated_word_index]
punctuation = np.argmax(np.array(generated_words) == '.')+1
generated_words = generated_words[:punctuation]
generated_sentence = ' '.join(generated_words)
return (generated_sentence)
def get_data(annotation_path, feature_path):
#load training/validation data
annotations = pd.read_table(annotation_path, sep='\t', header=None, names=['image', 'caption'])
return np.load(feature_path,'r'), annotations['caption'].values
def preProBuildWordVocab(sentence_iterator, word_count_threshold=30): # function from Andre Karpathy's NeuralTalk
#process and vectorize training/validation captions
print('preprocessing %d word vocab' % (word_count_threshold, ))
word_counts = {}
nsents = 0
for sent in sentence_iterator:
nsents += 1
for w in sent.lower().split(' '):
word_counts[w] = word_counts.get(w, 0) + 1
vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]
print('preprocessed words %d -> %d' % (len(word_counts), len(vocab)))
ixtoword = {}
ixtoword[0] = '.'
wordtoix = {}
wordtoix['#START#'] = 0
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
word_counts['.'] = nsents
bias_init_vector = np.array([1.0*word_counts[ixtoword[i]] for i in ixtoword])
bias_init_vector /= np.sum(bias_init_vector)
bias_init_vector = np.log(bias_init_vector)
bias_init_vector -= np.max(bias_init_vector)
return wordtoix, ixtoword, bias_init_vector.astype(np.float32)
dim_embed = 256
dim_hidden = 256
dim_in = 4096
batch_size = 128
momentum = 0.9
n_epochs = 25
def train(learning_rate=0.001, continue_training=False):
tf.reset_default_graph()
feats, captions = get_data(annotation_path, feature_path)
wordtoix, ixtoword, init_b = preProBuildWordVocab(captions)
np.save('data/ixtoword', ixtoword)
print ('num words:',len(ixtoword))
sess = tf.InteractiveSession()
n_words = len(wordtoix)
maxlen = 30
X, final_captions, mask, _map = load_text(2**19-3,captions)
running_decay=1
decay_rate=0.9999302192204246
with tf.device('/gpu:0'):
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, batch_size, maxlen+2, n_words, np.zeros(n_lstm_input).astype(np.float32),n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
loss, image, sentence, mask = caption_generator.build_model()
saver = tf.train.Saver(max_to_keep=100)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
tf.global_variables_initializer().run()
tf.train.Saver(var_list=caption_generator.all_encoding_weights,max_to_keep=100).restore(sess,tf.train.latest_checkpoint('modelsvardefdefsingle'))
if continue_training:
saver.restore(sess,tf.train.latest_checkpoint(model_path))
losses=[]
for epoch in range(n_epochs):
if epoch==1:
for w in caption_generator.all_encoding_weights:
w.trainable=True
index = (np.arange(len(feats)).astype(int))
np.random.shuffle(index)
index=index[:]
i=0
for start, end in zip( range(0, len(index), batch_size), range(batch_size, len(index), batch_size)):
#format data batch
current_feats = feats[index[start:end]]
current_captions = captions[index[start:end]]
current_caption_ind = [x for x in map(lambda cap: [wordtoix[word] for word in cap.lower().split(' ')[:-1] if word in wordtoix], current_captions)]
current_caption_matrix = sequence.pad_sequences(current_caption_ind, padding='post', maxlen=maxlen+1)
current_caption_matrix = np.hstack( [np.full( (len(current_caption_matrix),1), 0), current_caption_matrix] )
current_mask_matrix = np.zeros((current_caption_matrix.shape[0], current_caption_matrix.shape[1]))
nonzeros = np.array([x for x in map(lambda x: (x != 0).sum()+2, current_caption_matrix )])
current_capts=final_captions[index[start:end]]
for ind, row in enumerate(current_mask_matrix):
row[:nonzeros[ind]] = 1
_, loss_value,total_loss = sess.run([train_op, caption_generator.print_loss,loss], feed_dict={
image: current_feats.astype(np.float32),
caption_generator.output_placeholder : current_caption_matrix.astype(np.int32),
mask : current_mask_matrix.astype(np.float32),
sentence : current_capts.astype(np.float32)
})
print("Current Cost: ", loss_value, "\t Epoch {}/{}".format(epoch, n_epochs), "\t Iter {}/{}".format(start,len(feats)))
losses.append(loss_value*running_decay)
if epoch<9:
if i%3==0:
running_decay*=decay_rate
else:
if i%8==0:
running_decay*=decay_rate
i+=1
print losses[-1]
print("Saving the model from epoch: ", epoch)
pkl.dump(losses,open('losses/loss_o2e.pkl','wb'))
saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)
learning_rate *= 0.95
def test(sess,image,generated_words,ixtoword,idx=0): # Naive greedy search
feats, captions = get_data(annotation_path, feature_path)
feat = np.array([feats[idx]])
saver = tf.train.Saver()
sanity_check= False
# sanity_check=True
if not sanity_check:
saved_path=tf.train.latest_checkpoint(model_path)
saver.restore(sess, saved_path)
else:
tf.global_variables_initializer().run()
generated_word_index= sess.run(generated_words, feed_dict={image:feat})
generated_word_index = np.hstack(generated_word_index)
generated_sentence = [ixtoword[x] for x in generated_word_index]
print(generated_sentence)
if __name__=='__main__':
model_path = './models/tensorflow_o2e'
feature_path = './data/feats.npy'
annotation_path = './data/results_20130124.token'
import sys
feats, captions = get_data(annotation_path, feature_path)
n_input=19
binary_dim=n_input
n_lstm_input=1024
n_z=512
zero_end_tok=True
form2=True
vanilla=True
onehot=False
same_embedding=False
if sys.argv[1]=='train':
train()
elif sys.argv[1]=='test':
ixtoword = np.load('data/ixtoword.npy').tolist()
n_words = len(ixtoword)
maxlen=15
sess = tf.InteractiveSession()
batch_size=1
caption_generator = Caption_Generator(dim_in, dim_hidden, dim_embed, 1, maxlen+2, n_words,n_input=n_input,n_lstm_input=n_lstm_input,n_z=n_z)
image, generated_words = caption_generator.build_generator(maxlen=maxlen)
test(sess,image,generated_words,ixtoword,1) | mit |
huongttlan/statsmodels | statsmodels/datasets/heart/data.py | 25 | 1858 | """Heart Transplant Data, Miller 1976"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """???"""
TITLE = """Transplant Survival Data"""
SOURCE = """ Miller, R. (1976). Least squares regression with censored dara. Biometrica, 63 (3). 449-464.
"""
DESCRSHORT = """Survival times after receiving a heart transplant"""
DESCRLONG = """This data contains the survival time after receiving a heart transplant, the age of the patient and whether or not the survival time was censored.
"""
NOTE = """::
Number of Observations - 69
Number of Variables - 3
Variable name definitions::
death - Days after surgery until death
age - age at the time of surgery
censored - indicates if an observation is censored. 1 is uncensored
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
dset = du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
dset.censors = dset.exog[:,0]
dset.exog = dset.exog[:,1]
return dset
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/heart.csv', 'rb'),
delimiter=",", names = True, dtype=float)
return data
| bsd-3-clause |
zack3241/incubator-airflow | airflow/hooks/hive_hooks.py | 1 | 29065 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function, unicode_literals
from six.moves import zip
from past.builtins import basestring
import unicodecsv as csv
import itertools
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow import configuration as conf
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue or conf.get('hive',
'default_hive_mapred_queue')
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue),
'-hiveconf',
'mapred.job.queue.name={}'
.format(self.mapred_queue),
'-hiveconf',
'tez.job.queue.name={}'
.format(self.mapred_queue)
])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df,
table,
create=True,
recreate=False,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param field_dict: mapping from column name to hive data type
:type field_dict: dict
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
return dict((col, DTYPE_KIND_HIVE_TYPE[dtype.kind]) for col, dtype in df.dtypes.iteritems())
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
if field_dict is None and (create or recreate):
field_dict = _infer_field_types_from_df(df)
df.to_csv(f, sep=delimiter, **pandas_kwargs)
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:type field_dict: dict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n"
hql += ";"
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
self.log.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
self.log.warning(
"Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=schema or db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn(schema) as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
self.log.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn(schema) as conn:
with conn.cursor() as cur:
self.log.info("Running query: %s", hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
self.log.info("Written %s rows so far.", i)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
mcdaniel67/sympy | sympy/external/tests/test_importtools.py | 91 | 1215 | from sympy.external import import_module
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
| bsd-3-clause |
ldirer/scikit-learn | examples/bicluster/plot_bicluster_newsgroups.py | 14 | 5895 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
"""
from __future__ import print_function
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
print(__doc__)
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis],
# cols].sum() but much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
amoshyc/tthl-code | grad_cam.py | 1 | 1546 | import argparse
import random
from pathlib import Path
import numpy as np
import matplotlib.cm as cm
import matplotlib.colors as colors
from scipy.misc import imsave, imread, imresize
from keras.models import load_model, Sequential
from keras import activations
from vis.utils import utils
from vis.visualization import visualize_cam, overlay
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('model', help='model', type=str)
parser.add_argument('image_dir', help='image_dir', type=str)
parser.add_argument('output_dir', help='output_dir', type=str)
parser.add_argument('label', help='label', type=int)
args = parser.parse_args()
model = load_model(args.model)
paths = list(Path(args.image_dir).glob('*.jpg'))
paths = random.sample(paths, k=500)
output_dir = Path(args.output_dir)
(output_dir / '0').mkdir(parents=True, exist_ok=True)
(output_dir / '1').mkdir(parents=True, exist_ok=True)
layer_idx = utils.find_layer_idx(model, 'dense_2')
for path in tqdm(paths, ascii=True):
img = imread(str(path))
img = imresize(img, (224, 224))
pred = np.argmax(model.predict(np.expand_dims(img, axis=0)))
grads = visualize_cam(model, layer_idx, filter_indices=args.label, seed_input=img, backprop_modifier=None)
target_path = output_dir / str(pred) / f'{path.stem}_gcam.jpg'
imsave(str(target_path), overlay(grads, img, 0.4))
# python grad_cam.py .\log\gc_2017-09-24_20-17-12\0.836_08.h5 .\tmp\hl\ .\cam\hl\ 1
# python grad_cam.py .\log\gc_2017-09-24_20-17-12\0.836_08.h5 .\tmp\non\ .\cam\non\ 0 | apache-2.0 |
xiaohan2012/capitalization-restoration-train | single_classifier.py | 1 | 6656 | # Logistic regression is used for this task, instead of sequence model
import codecs
import numpy as np
import pickle
from itertools import (chain, izip)
import pandas as pds
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (classification_report, accuracy_score,
precision_recall_fscore_support,
confusion_matrix)
from sklearn import cross_validation
from sklearn.externals import joblib
from sklearn.grid_search import GridSearchCV
from scipy_util import (load_sparse_csr, save_sparse_csr)
from util import load_crfsuite_format_data
from unigram import UnigramLabeler
from error_display import print_label_error
# turn this ON when you want to rebuild the data matrices
LOAD_FROM_CACHE = 1
RETRAIN_MODEL = 1
ERROR_REPORT = 0
# Data preparation
train_path = "/cs/taatto/home/hxiao/capitalization-recovery/result/puls-100k/train.crfsuite.txt"
test_path = "/cs/taatto/home/hxiao/capitalization-recovery/result/puls-100k/test.crfsuite.txt"
if not LOAD_FROM_CACHE:
train_x, train_y = load_crfsuite_format_data(
codecs.open(train_path, 'r', 'utf8'))
test_x, test_y = load_crfsuite_format_data(
codecs.open(test_path, 'r', 'utf8'))
train_x, train_y = (chain.from_iterable(train_x),
chain.from_iterable(train_y))
# Debugging Purpose
# n = 100
# train_x = list(train_x)[:n]
# train_y = list(train_y)[:n]
test_x, test_y = chain.from_iterable(test_x), chain.from_iterable(test_y)
print "hashing the features"
dict_vect = DictVectorizer()
train_x = dict_vect.fit_transform(train_x)
test_x = dict_vect.transform(test_x)
print "encoding the labels"
label_encoder = LabelEncoder()
# import pdb
# pdb.set_trace()
train_y = label_encoder.fit_transform(list(train_y))
test_y = label_encoder.transform(list(test_y))
labels = label_encoder.classes_
for fname, obj in zip(
['cached_data/train_x.npz',
'cached_data/test_x.npz'],
(train_x, test_x)):
save_sparse_csr(fname, obj)
for fname, obj in zip(['cached_data/train_y.npy',
'cached_data/test_y.npy'],
(train_y, test_y)):
np.save(fname, obj)
pickle.dump(labels, open('cached_data/labels.pkl', 'w'))
# Dump DictVectorizer and LabelEncoder
pickle.dump(dict_vect, open('cached_data/dict_vect.pkl', 'w'))
pickle.dump(label_encoder, open('cached_data/label_encoder.pkl', 'w'))
else:
print "loading data"
train_x, test_x \
= map(load_sparse_csr, ('cached_data/train_x.npz',
'cached_data/test_x.npz'))
train_y = np.load('cached_data/train_y.npy')
test_y = np.load('cached_data/test_y.npy')
labels = pickle.load(open('cached_data/labels.pkl', 'r'))
dict_vect = pickle.load(open('cached_data/dict_vect.pkl', 'r'))
label_encoder = pickle.load(open('cached_data/label_encoder.pkl', 'r'))
# print(train_x.shape)
# # print(train_x[0])
if RETRAIN_MODEL:
# Train
train_x, test_x, train_y, test_y = cross_validation.train_test_split(
train_x,
train_y,
test_size=0.1,
random_state=0)
print(train_x.shape)
print(train_y.shape)
print(test_x.shape)
print(test_y.shape)
print "training model"
model = LogisticRegression(penalty='l2',
C=1.0,
verbose=2)
# Uncomment when you want grid search
# param_grid = {'penalty': ['l1', 'l2'],
# 'C': [0.1, 1, 10]}
# model = GridSearchCV(LogisticRegression(verbose=2), param_grid=param_grid,
# verbose=2, n_jobs=6)
model.fit(train_x, train_y)
print model
pred_y = model.predict(test_x)
# Evaluation
print "Evaluation summary:"
print "Subset accuracy: %.2f\n" % \
(accuracy_score(test_y, pred_y) * 100)
# print "Accuracy(Jaccard): %.2f\n" % (jaccard_similarity_score(test_y, pred_y))
# p_ex, r_ex, f_ex, _ = precision_recall_fscore_support(test_y, pred_y,
# average="samples")
print classification_report(test_y, pred_y,
target_names=labels,
digits=4)
p_mac, r_mac, f_mac, _\
= precision_recall_fscore_support(test_y, pred_y,
average="macro")
print "Precision/Recall/F1(macro) : %.4f %.4f %.4f\n" \
% (p_mac, r_mac, f_mac)
p_mic, r_mic, f_mic, _\
= precision_recall_fscore_support(test_y, pred_y,
average="micro")
print "Precision/Recall/F1(micro) : %.4f %.4f %.4f\n" \
% (p_mic, r_mic, f_mic)
joblib.dump(model, 'cached_data/model.pkl')
else:
model = joblib.load('cached_data/model.pkl')
if ERROR_REPORT:
print "Error examples"
test_x_features, test_y = load_crfsuite_format_data(
codecs.open(test_path, 'r', 'utf8'))
labeler = UnigramLabeler(dict_vect, label_encoder, model)
flat_pred_y = labeler.predict(chain.from_iterable(test_x_features))
# unflatten the predicted labels
pred_y = []
current_index = 0
for sent_y in test_y:
pred_y.append(flat_pred_y[current_index: current_index+len(sent_y)])
current_index += len(sent_y)
assert len(pred_y) == len(test_x_features)
sents = [[word['word[0]']
for word in words]
for words in test_x_features]
for words, features,\
true_labels, pred_labels in izip(sents,
test_x_features, test_y, pred_y):
print_label_error(words, features,
true_labels, pred_labels,
target_true_label='IC', target_pred_label='AL',
print_features=True,
model=model,
dict_vect=dict_vect,
label_encoder=label_encoder)
print "Confusion matrix:"
table = pds.DataFrame(confusion_matrix(list(chain.from_iterable(test_y)),
list(chain.from_iterable(pred_y)),
labels=labels),
index=map(lambda s: '{}_true'.format(s), labels),
columns=map(lambda s: '{}_pred'.format(s), labels))
print table
| mit |
rahlk/RAAT | src/tools/misc.py | 2 | 1959 | from pandas import DataFrame, read_csv, concat
from os import walk
import numpy as np
from pdb import set_trace
import sys
def say(text):
sys.stdout.write(str(text))
def shuffle(df, n=1, axis=0):
df = df.copy()
for _ in range(n):
df.apply(np.random.shuffle, axis=axis)
return df
def csv2DF(dir, as_mtx=False, toBin=False):
files=[]
for f in dir:
df=read_csv(f)
headers = [h for h in df.columns if '?' not in h]
# set_trace()
if isinstance(df[df.columns[-1]][0], str):
df[df.columns[-1]] = DataFrame([0 if 'N' in d or 'n' in d else 1 for d in df[df.columns[-1]]])
if toBin:
df[df.columns[-1]]=DataFrame([1 if d > 0 else 0 for d in df[df.columns[-1]]])
files.append(df[headers])
"For N files in a project, use 1 to N-1 as train."
data_DF = concat(files)
if as_mtx: return data_DF.as_matrix()
else: return data_DF
def explore(dir='../Data/Jureczko/', name=None):
datasets = []
for (dirpath, dirnames, filenames) in walk(dir):
datasets.append(dirpath)
training = []
testing = []
if name:
for k in datasets[1:]:
if name in k:
if 'Jureczko' or 'mccabe' in dir:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
# set_trace()
training = [train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store' and '.csv' in p]
testing = test
return training, testing
elif 'Seigmund' in dir:
train = [dir+name+'/'+fname[0] for dirPath, _, fname in walk(k)]
return train
else:
for k in datasets[1:]:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
training.append(
[train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store'])
testing.append(test)
return training, testing
| mit |
msbeta/apollo | modules/tools/record_analyzer/main.py | 1 | 5886 | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
import argparse
import matplotlib.pyplot as plt
from cyber_py.record import RecordReader
from modules.control.proto import control_cmd_pb2
from modules.planning.proto import planning_pb2
from modules.canbus.proto import chassis_pb2
from modules.drivers.proto import pointcloud_pb2
from module_control_analyzer import ControlAnalyzer
from module_planning_analyzer import PlannigAnalyzer
from modules.perception.proto import perception_obstacle_pb2
from modules.prediction.proto import prediction_obstacle_pb2
from lidar_endtoend_analyzer import LidarEndToEndAnalyzer
def process(control_analyzer, planning_analyzer, lidar_endtoend_analyzer,
is_simulation, plot_planning_path, plot_planning_refpath, all_data):
is_auto_drive = False
for msg in reader.read_messages():
if msg.topic == "/apollo/canbus/chassis":
chassis = chassis_pb2.Chassis()
chassis.ParseFromString(msg.message)
if chassis.driving_mode == \
chassis_pb2.Chassis.COMPLETE_AUTO_DRIVE:
is_auto_drive = True
else:
is_auto_drive = False
if msg.topic == "/apollo/control":
if (not is_auto_drive and not all_data) or \
is_simulation or plot_planning_path or plot_planning_refpath:
continue
control_cmd = control_cmd_pb2.ControlCommand()
control_cmd.ParseFromString(msg.message)
control_analyzer.put(control_cmd)
lidar_endtoend_analyzer.put_control(control_cmd)
if msg.topic == "/apollo/planning":
if (not is_auto_drive) and (not all_data):
continue
adc_trajectory = planning_pb2.ADCTrajectory()
adc_trajectory.ParseFromString(msg.message)
planning_analyzer.put(adc_trajectory)
lidar_endtoend_analyzer.put_planning(adc_trajectory)
if plot_planning_path:
planning_analyzer.plot_path(plt, adc_trajectory)
if plot_planning_refpath:
planning_analyzer.plot_refpath(plt, adc_trajectory)
if msg.topic == "/apollo/sensor/velodyne64/compensator/PointCloud2" or \
msg.topic == "/apollo/sensor/lidar128/compensator/PointCloud2":
if ((not is_auto_drive) and (not all_data)) or is_simulation or \
plot_planning_path or plot_planning_refpath:
continue
point_cloud = pointcloud_pb2.PointCloud()
point_cloud.ParseFromString(msg.message)
lidar_endtoend_analyzer.put_lidar(point_cloud)
if msg.topic == "/apollo/perception/obstacles":
if ((not is_auto_drive) and (not all_data)) or is_simulation or \
plot_planning_path or plot_planning_refpath:
continue
perception = perception_obstacle_pb2.PerceptionObstacles()
perception.ParseFromString(msg.message)
if msg.topic == "/apollo/prediction":
if ((not is_auto_drive) and (not all_data)) or is_simulation or \
plot_planning_path or plot_planning_refpath:
continue
prediction = prediction_obstacle_pb2.PredictionObstacles()
prediction.ParseFromString(msg.message)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "usage: python main.py record_file"
parser = argparse.ArgumentParser(
description="Recode Analyzer is a tool to analyze record files.",
prog="main.py")
parser.add_argument(
"-f", "--file", action="store", type=str, required=True,
help="Specify the record file for analysis.")
parser.add_argument(
"-s", "--simulation", action="store_const", const=True,
help="For simulation API call")
parser.add_argument(
"-path", "--planningpath", action="store_const", const=True,
help="plot planing paths in cartesian coordinate.")
parser.add_argument(
"-refpath", "--planningrefpath", action="store_const", const=True,
help="plot planing reference paths in cartesian coordinate.")
parser.add_argument(
"-a", "--alldata", action="store_const", const=True,
help="Analyze all data (both auto and manual), otherwise auto data only without this option.")
args = parser.parse_args()
record_file = args.file
reader = RecordReader(record_file)
control_analyzer = ControlAnalyzer()
planning_analyzer = PlannigAnalyzer(args.simulation)
lidar_endtoend_analyzer = LidarEndToEndAnalyzer()
process(control_analyzer, planning_analyzer,
lidar_endtoend_analyzer, args.simulation, args.planningpath,
args.planningrefpath, args.alldata)
if args.simulation:
planning_analyzer.print_simulation_results()
elif args.planningpath or args.planningrefpath:
plt.axis('equal')
plt.show()
else:
control_analyzer.print_latency_statistics()
planning_analyzer.print_latency_statistics()
lidar_endtoend_analyzer.print_endtoend_latency()
| apache-2.0 |
yala/introdeeplearning | draft/rnn.py | 1 | 3591 | import tensorflow as tf
import cPickle as pickle
from collections import defaultdict
import re, random
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
#Read data and do preprocessing
def read_data(fn):
with open(fn) as f:
data = pickle.load(f)
#Clean the text
new_data = []
pattern = re.compile('[\W_]+')
for text,label in data:
text = text.strip("\r\n ").split()
x = []
for word in text:
word = pattern.sub('', word)
word = word.lower()
if 0 < len(word) < 20:
x.append(word)
new_data.append((' '.join(x),label))
return new_data
train = read_data("data/train.p")
print train[0:10]
train_x, train_y = zip(*train)
vectorizer = CountVectorizer(train_x, min_df=0.001)
vectorizer.fit(train_x)
vocab = vectorizer.vocabulary_
UNK_ID = len(vocab)
PAD_ID = len(vocab) + 1
word2id = lambda w:vocab[w] if w in vocab else UNK_ID
train_x = [[word2id(w) for w in x.split()] for x in train_x]
train_data = zip(train_x, train_y)
import math
#build RNN model
batch_size = 20
hidden_size = 100
vocab_size = len(vocab) + 2
def lookup_table(input_, vocab_size, output_size, name):
with tf.variable_scope(name):
embedding = tf.get_variable("embedding", [vocab_size, output_size], tf.float32, tf.random_normal_initializer(stddev=1.0 / math.sqrt(output_size)))
return tf.nn.embedding_lookup(embedding, input_)
def linear(input_, output_size, name, init_bias=0.0):
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
W = tf.get_variable("Matrix", [shape[-1], output_size], tf.float32, tf.random_normal_initializer(stddev=1.0 / math.sqrt(shape[-1])))
if init_bias is None:
return tf.matmul(input_, W)
with tf.variable_scope(name):
b = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(init_bias))
return tf.matmul(input_, W) + b
session = tf.Session()
tweets = tf.placeholder(tf.int32, [batch_size, None])
labels = tf.placeholder(tf.float32, [batch_size])
embedding = lookup_table(tweets, vocab_size, hidden_size, name="word_embedding")
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size)
init_state = lstm_cell.zero_state(batch_size, tf.float32)
_, final_state = tf.nn.dynamic_rnn(lstm_cell, embedding, initial_state=init_state)
sentiment = linear(final_state[1], 1, name="output")
sentiment = tf.squeeze(sentiment, [1])
loss = tf.nn.sigmoid_cross_entropy_with_logits(sentiment, labels)
loss = tf.reduce_mean(loss)
prediction = tf.to_float(tf.greater_equal(sentiment, 0.5))
pred_err = tf.to_float(tf.not_equal(prediction, labels))
pred_err = tf.reduce_sum(pred_err)
optimizer = tf.train.AdamOptimizer().minimize(loss)
tf.global_variables_initializer().run(session=session)
saver = tf.train.Saver()
random.shuffle(train_data)
err_rate = 0.0
for step in xrange(0, len(train_data), batch_size):
batch = train_data[step:step+batch_size]
batch_x, batch_y = zip(*batch)
batch_x = list(batch_x)
if len(batch_x) != batch_size:
continue
max_len = max([len(x) for x in batch_x])
for i in xrange(batch_size):
len_x = len(batch_x[i])
batch_x[i] = [PAD_ID] * (max_len - len_x) + batch_x[i]
batch_x = np.array(batch_x, dtype=np.int32)
batch_y = np.array(batch_y, dtype=np.float32)
feed_map = {tweets:batch_x, labels:batch_y}
_, batch_err = session.run([optimizer, pred_err], feed_dict=feed_map)
err_rate += batch_err
if step % 100 == 0 and step > 0:
print err_rate / step
| mit |
hwangjt/SMT | setup.py | 1 | 2022 | '''
Author: Dr. John T. Hwang <[email protected]>
Dr. Mohamed A. Bouhlel <mbouhlel@umich>
This package is distributed under New BSD license.
'''
from setuptools import setup, Extension
import os
import sys
from subprocess import call
import numpy as np
try:
import Cython
except ImportError:
import pip
pip.main(['install', 'Cython'])
from Cython.Build import cythonize
extra_compile_args=[]
if not sys.platform.startswith('win'):
extra_compile_args.append('-std=c++11')
ext = cythonize(
Extension("smt.surrogate_models.rbfclib",
sources=[
'smt/src/rbf/rbf.cpp',
'smt/src/rbf/rbfclib.pyx',
],
language="c++", extra_compile_args=extra_compile_args,
include_dirs=[np.get_include(),
])) + cythonize(
Extension("smt.surrogate_models.idwclib",
sources=[
'smt/src/idw/idw.cpp',
'smt/src/idw/idwclib.pyx',
],
language="c++", extra_compile_args=extra_compile_args,
include_dirs=[np.get_include(),
])) + cythonize(
Extension("smt.surrogate_models.rmtsclib",
sources=[
'smt/src/rmts/rmtsclib.pyx',
'smt/src/rmts/utils.cpp',
'smt/src/rmts/rmts.cpp',
'smt/src/rmts/rmtb.cpp',
'smt/src/rmts/rmtc.cpp',
],
language="c++", extra_compile_args=extra_compile_args,
include_dirs=[np.get_include(),
]))
setup(name='smt',
version='0.1',
description='The Surrogate Modeling Toolbox (SMT)',
author='Mohamed Amine Bouhlel',
author_email='[email protected]',
license='BSD-3',
packages=[
'smt',
'smt/surrogate_models',
'smt/problems',
'smt/sampling_methods',
'smt/utils',
],
install_requires=[
'scikit-learn',
'pyDOE',
'matplotlib',
'numpydoc',
'six>=1.10',
'scipy'
],
zip_safe=False,
ext_modules=ext,
url = 'https://github.com/SMTorg/smt', # use the URL to the github repo
download_url = 'https://github.com/SMTorg/smt/archive/v0.1.tar.gz',
)
| bsd-3-clause |
colincsl/pyKinectTools | pyKinectTools/scripts/PoseInitAndTracking.py | 1 | 17374 | """
Main file for training multi-camera pose
"""
#import os
#import time
import itertools as it
from joblib import Parallel, delayed
import cPickle as pickle
import optparse
from copy import deepcopy
import numpy as np
import scipy.misc as sm
import scipy.ndimage as nd
import Image
import cv2
import skimage
from skimage import color
from skimage.draw import line, circle
from skimage.color import rgb2gray,gray2rgb, rgb2lab
from skimage.feature import local_binary_pattern, match_template, peak_local_max
from pyKinectTools.dataset_readers.KinectPlayer import KinectPlayer, display_help
from pyKinectTools.utils.DepthUtils import *
from pyKinectTools.utils.SkeletonUtils import display_skeletons, transform_skels, kinect_to_msr_skel, msr_to_kinect_skel
from pyKinectTools.dataset_readers.MHADPlayer import MHADPlayer
from pyKinectTools.algs.GeodesicSkeleton import *
from pyKinectTools.algs.PoseTracking import *
from pyKinectTools.algs.LocalOccupancyPattern import *
from pyKinectTools.algs.IterativeClosestPoint import IterativeClosestPoint
from sklearn.linear_model import SGDClassifier
from sklearn.kernel_approximation import AdditiveChi2Sampler
from IPython import embed
np.seterr(all='ignore')
# -------------------------MAIN------------------------------------------
def main(visualize=False, learn=False, actions=None, subjects=None, n_frames=220):
# learn = True
# learn = False
if actions is []:
actions = [2]
if subjects is []:
subjects = [2]
# actions = [1]
# actions = [1, 2, 3, 4, 5]
# subjects = [1]
if 1:
MHAD = True
cam = MHADPlayer(base_dir='/Users/colin/Data/BerkeleyMHAD/', kinect=1, actions=actions, subjects=subjects, reps=[1], get_depth=True, get_color=True, get_skeleton=True, fill_images=False)
else:
MHAD = False
cam = KinectPlayer(base_dir='./', device=1, bg_subtraction=True, get_depth=True, get_color=True, get_skeleton=True, fill_images=False)
bg = Image.open('/Users/colin/Data/JHU_RGBD_Pose/CIRL_Background_A.tif')
# cam = KinectPlayer(base_dir='./', device=2, bg_subtraction=True, get_depth=True, get_color=True, get_skeleton=True, fill_images=False)
# bg = Image.open('/Users/colin/Data/JHU_RGBD_Pose/CIRL_Background_B.tif')
cam.bgSubtraction.backgroundModel = np.array(bg.getdata()).reshape([240,320]).clip(0, 4500)
height, width = cam.depthIm.shape
skel_previous = None
face_detector = FaceDetector()
hand_detector = HandDetector(cam.depthIm.shape)
# curve_detector = CurveDetector(cam.depthIm.shape)
# Video writer
# video_writer = cv2.VideoWriter("/Users/colin/Desktop/test.avi", cv2.cv.CV_FOURCC('M','J','P','G'), 15, (320,240))
# Save Background model
# im = Image.fromarray(cam.depthIm.astype(np.int32), 'I')
# im.save("/Users/Colin/Desktop/k2.png")
# Setup pose database
append = True
append = False
# pose_database = PoseDatabase("PoseDatabase.pkl", learn=learn, search_joints=[0,4,7,10,13], append=append)
pose_database = PoseDatabase("PoseDatabase.pkl", learn=learn, search_joints=[0,2,4,5,7,10,13], append=append)
# Setup Tracking
skel_init, joint_size, constraint_links, features_joints,skel_parts, convert_to_kinect = get_14_joint_properties()
constraint_values = []
for c in constraint_links:
constraint_values += [np.linalg.norm(skel_init[c[0]]-skel_init[c[1]], 2)]
constraint_values = np.array(constraint_values)
skel_current = None#skel_init.copy()
skel_previous = None#skel_current.copy()
# Evaluation
accuracy_all_db = []
accuracy_all_track = []
joint_accuracy_db = []
joint_accuracy_track = []
# geo_accuracy = []
# color_accuracy = []
# lbp_accuracy = []
frame_count = 0
frame_rate = 1
if not MHAD:
cam.next(350)
frame_prev = 0
# try:
if 1:
while cam.next(frame_rate):# and frame_count < n_frames:
if frame_count - frame_prev > 100:
print ""
print "Frame #{0:d}".format(frame_count)
frame_prev = frame_count
if not MHAD:
if len(cam.users) == 0:
continue
else:
# cam.users = [np.array(cam.users[0]['jointPositions'].values())]
if np.any(cam.users[0][0] == -1):
continue
cam.users[0][:,1] *= -1
cam.users_uv_msr = [cam.camera_model.world2im(cam.users[0], [240,320])]
# Apply mask to image
if MHAD:
mask = cam.get_person(2) > 0
else:
mask = cam.get_person() > 0
if np.all(mask==False):
continue
im_depth = cam.depthIm
cam.depthIm[cam.depthIm>3000] = 0
im_color = cam.colorIm*mask[:,:,None]
cam.colorIm *= mask[:,:,None]
pose_truth = cam.users[0]
pose_truth_uv = cam.users_uv_msr[0]
# Get bounding box around person
box = nd.find_objects(mask)[0]
d = 20
# Widen box
box = (slice(np.maximum(box[0].start-d, 0), \
np.minimum(box[0].stop+d, height-1)), \
slice(np.maximum(box[1].start-d, 0), \
np.minimum(box[1].stop+d, width-1)))
box_corner = [box[0].start,box[1].start]
''' ---------- ----------------------------------- --------'''
''' ----------- Feature Detector centric approach ---------'''
''' ---------- ----------------------------------- --------'''
''' ---- Calculate Detectors ---- '''
# Face detection
face_detector.run(im_color[box])
# Skin detection
hand_markers = hand_detector.run(im_color[box], n_peaks=3)
# Calculate Geodesic Extrema
im_pos = cam.camera_model.im2PosIm(cam.depthIm*mask)[box] * mask[box][:,:,None]
geodesic_markers = geodesic_extrema_MPI(im_pos, iterations=5, visualize=False)
_, geo_map = geodesic_extrema_MPI(im_pos, iterations=1, visualize=True)
geodesic_markers_pos = im_pos[geodesic_markers[:,0], geodesic_markers[:,1]]
markers = list(geodesic_markers) + list(hand_markers) #+ list(lop_markers) + curve_markers
markers = np.array([list(x) for x in markers])
''' ---- Database lookup ---- '''
if 1:
pts_mean = im_pos[(im_pos!=0)[:,:,2]].mean(0)
if learn:
# Normalize pose
pose_uv = cam.users_uv[0]
if np.any(pose_uv==0):
print "skip"
frame_count += frame_rate
continue
pose_database.update(pose_truth - pts_mean)
else:
# Concatenate markers
markers = list(geodesic_markers) + hand_markers
markers = np.array([list(x) for x in markers])
# Normalize pose
pts = im_pos[markers[:,0], markers[:,1]]
pts = np.array([x for x in pts if x[0] != 0])
pts -= pts_mean
# Get closest pose
pose = pose_database.query(pts, knn=5)
# embed()
for i in range(5):
pose_tmp = cam.camera_model.world2im(pose[i]+pts_mean, cam.depthIm.shape)
cam.colorIm = display_skeletons(cam.colorIm, pose_tmp, skel_type='Kinect', color=(0,i*40+50,0))
pose = pose[0]
# im_pos -= pts_mean
# R,t = IterativeClosestPoint(pose, im_pos.reshape([-1,3])-pts_mean, max_iters=5, min_change=.001, pt_tolerance=10000)
# pose = np.dot(R.T, pose.T).T - t
# pose = np.dot(R, pose.T).T + t
pose += pts_mean
pose_uv = cam.camera_model.world2im(pose, cam.depthIm.shape)
# print pose
surface_map = nd.distance_transform_edt(-nd.binary_erosion(mask[box]), return_distances=False, return_indices=True)
try:
pose_uv[:,:2] = surface_map[:, pose_uv[:,0]-box_corner[0], pose_uv[:,1]-box_corner[1]].T + [box_corner[0], box_corner[1]]
except:
pass
pose = cam.camera_model.im2world(pose_uv, cam.depthIm.shape)
# print pose
''' ---- Tracker ---- '''
# surface_map = nd.distance_transform_edt(-mask[box], return_distances=False, return_indices=True)
# surface_map = nd.distance_transform_edt(im_pos[:,:,2]==0, return_distances=False, return_indices=True)
if skel_previous is None:
# if 1:
skel_previous = pose.copy()
skel_current = pose.copy()
skel_previous_uv = pose_uv.copy()
skel_current_uv = pose_uv.copy()
for _ in range(1):
# ---- (Step 1A) Find feature coordespondences ----
try:
skel_previous_uv[:,:2] = surface_map[:, skel_previous_uv[:,0]-box_corner[0], skel_previous_uv[:,1]-box_corner[1]].T + [box_corner[0], box_corner[1]]
except:
pass
skel_current = cam.camera_model.im2world(skel_previous_uv, cam.depthIm.shape)
# Alternative method: use kdtree
## Calc euclidian distance between each pixel and all joints
px_corr = np.zeros([im_pos.shape[0], im_pos.shape[1], len(skel_current)])
# for i,s in enumerate(pose):
# for i,s in enumerate(skel_current):
# px_corr[:,:,i] = np.sqrt(np.sum((im_pos - s)**2, -1))# / joint_size[i]**2
# for i,s in enumerate(pose_uv):
# Geodesics
for i,s in enumerate(skel_previous_uv):
''' Problem: need to constrain pose_uv to mask '''
_, geo_map = geodesic_extrema_MPI(im_pos, [s[0]-box_corner[0],s[1]-box_corner[1]], iterations=1, visualize=True)
px_corr[:,:,i] = geo_map
subplot(2,7,i+1)
# imshow(geo_map, vmin=0, vmax=2000)
# axis('off')
px_corr[geo_map==0,i] = 9999
cv2.imshow('gMap', (px_corr.argmin(-1)+1)/15.*mask[box])
## Handle occlusions by argmax'ing over set of skel parts
# visible_configurations = list(it.product([0,1], repeat=5))[1:]
visible_configurations = [
# [0,1,1,1,1],
# [1,0,0,0,0],
[1,1,1,1,1]
]
px_visibility_label = np.zeros([im_pos.shape[0], im_pos.shape[1], len(visible_configurations)], dtype=np.uint8)
visible_scores = np.ones(len(visible_configurations))*np.inf
# Try each occlusion configuration set
for i,v in enumerate(visible_configurations):
visible_joints = list(it.chain.from_iterable(skel_parts[np.array(v)>0]))
px_visibility_label[:,:,i] = np.argmin(px_corr[:,:,visible_joints], -1)#.reshape([im_pos.shape[0], im_pos.shape[1]])
visible_scores[i] = np.min(px_corr[:,:,visible_joints], -1).sum()
# Choose best occlusion configuration
occlusion_index = np.argmin(visible_scores)
occlusion_configuration = visible_configurations[occlusion_index]
occlusion_set = list(it.chain.from_iterable(skel_parts[np.array(visible_configurations[occlusion_index])>0]))
# Choose label for pixels based on occlusion configuration
px_label = px_visibility_label[:,:,occlusion_index]*mask[box]
px_label_flat = px_visibility_label[:,:,occlusion_index][mask[box]].flatten()
visible_joints = [1 if x in occlusion_set else 0 for x in range(len(pose))]
# print visible_joints
# Project distance to joint's radius
px_joint_displacement = im_pos[mask[box]] - skel_current[px_label_flat]
px_joint_magnitude = np.sqrt(np.sum(px_joint_displacement**2,-1))
joint_mesh_pos = skel_current[px_label_flat] + px_joint_displacement*(joint_size[px_label_flat]/px_joint_magnitude)[:,None]
px_joint_displacement = joint_mesh_pos - im_pos[mask[box]]
# Ensure pts aren't too far away
px_joint_displacement[np.abs(px_joint_displacement) > 500] = 0
# embed()
if 0:
x = im_pos.copy()*0
x[mask[box]] = joint_mesh_pos
for i in range(3):
subplot(1,4,i+1)
imshow(x[:,:,i])
axis('off')
subplot(1,4,4)
imshow((px_label+1)*mask[box])
# Calc the correspondance change in position for each joint
correspondence_displacement = np.zeros([len(skel_current), 3])
ii = 0
for i,_ in enumerate(skel_current):
if i in occlusion_set:
labels = px_label_flat==i
correspondence_displacement[i] = np.sum(px_joint_displacement[px_label_flat==ii], 0) / np.sum(px_joint_displacement[px_label_flat==ii]!=0)
ii+=1
correspondence_displacement = np.nan_to_num(correspondence_displacement)
# print correspondence_displacement
# Viz correspondences
if 0:
x = im_pos.copy()*0
x[mask[box]] = px_joint_displacement
for i in range(3):
subplot(1,4,i+1)
imshow(x[:,:,i])
axis('off')
subplot(1,4,4)
imshow((px_label+1)*mask[box])
# embed()
# for j in range(3):
# for i in range(14):
# subplot(3,14,j*14+i+1)
# imshow(x[:,:,j]*((px_label==i)*mask[box]))
# axis('off')
show()
# ---- (Step 2) Update pose state, x ----
lambda_p = .0
lambda_c = 1.
skel_prev_difference = (skel_current - skel_previous)
# print skel_prev_difference
skel_current = skel_previous \
+ lambda_p * skel_prev_difference \
- lambda_c * correspondence_displacement#\
# ---- (Step 3) Add constraints ----
if 1:
# A: Link lengths / geometry
# skel_current = link_length_constraints(skel_current, constraint_links, constraint_values, alpha=.5)
skel_current = geometry_constraints(skel_current, joint_size, alpha=0.5)
skel_current = collision_constraints(skel_current, constraint_links)
skel_img_box = (cam.camera_model.world2im(skel_current, cam.depthIm.shape) - [box[0].start, box[1].start, 0])#/mask_interval
skel_img_box = skel_img_box.clip([0,0,0], [box[0].stop-box[0].start-1, box[1].stop-box[1].start-1, 9999])
# skel_img_box = skel_img_box.clip([0,0,0], [cam.depthIm.shape[0]-1, cam.depthIm.shape[1]-1, 9999])
# B: Ray-cast constraints
# embed()
skel_current, skel_current_uv = ray_cast_constraints(skel_current, skel_img_box, im_pos, surface_map, joint_size)
# skel_img_box -= [box[0].start, box[1].start, 0]
# # Map back from mask to image
# try:
# skel_current_uv[:,:2] = surface_map[:, skel_img_box[:,0], skel_img_box[:,1]].T# + [box_corner[0], box_corner[1]]
# except:
# pass
prob = link_length_probability(skel_current, constraint_links, constraint_values, 100)
# print "Prob:", np.mean(prob), np.min(prob), prob
print frame_count
thresh = .05
if np.min(prob) < thresh:# and frame_count > 1:
print 'Resetting pose'
for c in constraint_links[prob<thresh]:
for cc in c:
skel_current_uv[c] = pose_uv[c] - [box[0].start, box[1].start, 0]
skel_current[c] = pose[c]
# skel_current_uv = pose_uv.copy() - [box[0].start, box[1].start, 0]
# skel_current = pose.copy()
skel_current_uv = skel_current_uv + [box[0].start, box[1].start, 0]
skel_current = cam.camera_model.im2world(skel_current_uv, cam.depthIm.shape)
else:
skel_current_uv = (cam.camera_model.world2im(skel_current, cam.depthIm.shape))
# skel_img_box = skel_img_box.clip([0,0,0], [cam.depthIm.shape[0]-1, cam.depthIm.shape[1]-1, 9999])
# Update for next round
skel_previous = skel_current.copy()
skel_previous_uv = skel_current_uv.copy()
''' ---- Accuracy ---- '''
# embed()
if 1 and not learn:
# pose_truth = cam.users[0]
error_db = pose_truth - pose
error_track = pose_truth - skel_current
# print "Error", error
error_l2_db = np.sqrt(np.sum(error_db**2, 1))
error_l2_track = np.sqrt(np.sum(error_track**2, 1))
joint_accuracy_db += [error_l2_db]
joint_accuracy_track += [error_l2_track]
accuracy_db = np.sum(error_l2_db < 150) / 14.
accuracy_track = np.sum(error_l2_track < 150) / 14.
print "Current db:", accuracy_db, error_l2_db.mean()
print "Current track:", accuracy_track, error_l2_track.mean()
print ""
accuracy_all_db += [accuracy_db]
accuracy_all_track += [accuracy_track]
# print "Running avg:", np.mean(accuracy_all)
# print "Joint avg (per-joint):", np.mean(joint_accuracy_all, -1)
# print "Joint avg (overall):", np.mean(joint_accuracy_all)
''' --- Visualization --- '''
display_markers(cam.colorIm, hand_markers[:2], box, color=(0,250,0))
if len(hand_markers) > 2:
display_markers(cam.colorIm, [hand_markers[2]], box, color=(0,200,0))
display_markers(cam.colorIm, geodesic_markers, box, color=(200,0,0))
# display_markers(cam.colorIm, curve_markers, box, color=(0,100,100))
# display_markers(cam.colorIm, lop_markers, box, color=(0,0,200))
cam.colorIm = display_skeletons(cam.colorIm, pose_truth_uv, skel_type='Kinect', color=(0,255,0))
cam.colorIm = display_skeletons(cam.colorIm, pose_uv, skel_type='Kinect')
cam.colorIm = display_skeletons(cam.colorIm, skel_current_uv, skel_type='Kinect', color=(0,0,255))
# cam.visualize(color=True, depth=False)
cam.visualize(color=True, depth=True)
# embed()
# ------------------------------------------------------------
# video_writer.write((geo_clf_map/float(geo_clf_map.max())*255.).astype(np.uint8))
# video_writer.write(cam.colorIm[:,:,[2,1,0]])
frame_count += frame_rate
# except:
# pass
print "-- Results for subject {:d} action {:d}".format(subjects[0],actions[0])
print "Running avg (db):", np.mean(accuracy_all_db)
print "Running avg (track):", np.mean(accuracy_all_track)
print "Joint avg (overall db):", np.mean(joint_accuracy_db)
print "Joint avg (overall track):", np.mean(joint_accuracy_track)
# print 'Done'
embed()
return
if __name__=="__main__":
parser = optparse.OptionParser()
parser.add_option('-v', '--visualize', dest='viz', action="store_true", default=False, help='Enable visualization')
parser.add_option('-l', '--learn', dest='learn', action="store_true", default=False, help='Training phase')
parser.add_option('-a', '--actions', dest='actions', type='int', action='append', default=[], help='Training phase')
parser.add_option('-s', '--subjects', dest='subjects', type='int', action='append', default=[], help='Training phase')
(opt, args) = parser.parse_args()
main(visualize=opt.viz, learn=opt.learn, actions=opt.actions, subjects=opt.subjects)
| bsd-2-clause |
mehdidc/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
PabloPiaggi/plumed2 | user-doc/tutorials/others/ves-lugano2017-kinetics/TRAJECTORIES-1700K/cdf-analysis.py | 6 | 1134 | #!/usr/bin/env python
import numpy as np
from scipy.stats import ks_2samp
from scipy.optimize import curve_fit
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib.pyplot as plt
f=open('fpt.dat','r')
# define theoretical CDF
def func(x,tau):
return 1-np.exp(-x/tau)
x = []
count=0
for line in f:
line=line.strip()
columns=line.split()
x.append(float(columns[0]))
count=count+1
x = np.array(x)
# for numerical stability we divide by the mean
mu=x.mean()
x=x/mu
# now compute emirical CDF
ecdf = ECDF(x)
# plot ECDF
x1 = np.linspace(min(x), max(x))
y1 = ecdf(x1)
plt.step(x1*mu, y1,'k-',lw=3.)
# fit to theoretical CDF to obtain tau
popt,pcov = curve_fit(func,x1,y1)
tau=popt[0]
print 'mean of data',mu
print 'best fit tau',tau*mu
yfit=func(x1,tau)
# plot fit
plt.plot(x1*mu,yfit,'b-',lw=3.)
# for p-value
# now generate some random data with the same exponential distribution
np.random.seed(12345678);
x2 = np.random.exponential(1/tau,1000)
st,p = ks_2samp(x2,x)
print 'p-value',p
plt.xscale('log')
plt.xlabel('time [s]')
plt.ylabel('Cumulative Probability')
plt.show()
| lgpl-3.0 |
parklab/PaSDqc | setup.py | 1 | 1758 | # from distutils.core import setup
from setuptools import setup
def check_dependencies():
install_requires = []
# Assuming the python standard library is installed...
try:
import pathlib
except ImportError:
install_requires.append('pathlib')
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import seaborn
except ImportError:
install_requires.append('seaborn')
try:
import pandas
except ImportError:
install_requires.append('pandas')
try:
import statsmodels
except ImportError:
install_requires.append('statsmodels')
try:
import astropy
except ImportError:
install_requires.append('astropy')
try:
import plotly
except ImportError:
install_requires.append('plotly')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(
name = 'PaSDqc',
description = "Quality control for single cell whole genome sequencing",
version = '1.1.0',
packages = ['PaSDqc'],
scripts = ['scripts/PaSDqc'],
install_requires = install_requires,
author = "Maxwell A. Sherman",
author_email = "[email protected]",
url = "https://github.com/parklab/PaSDqc",
license = 'MIT',
include_package_data = True,
package_data = {
'PaSDqc': ['db/*'],
}
# include_package_data=True,
)
| mit |
RayMick/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
thp44/delphin_6_automation | delphin_6_automation/sampling/archive/sampling.py | 1 | 4460 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 16:14:23 2017
@author: Astrid
"""
import os
import scipy.io as sio
import numpy as np
import pandas as pd
import sys
from scipy.stats import norm
from scipy.stats import randint
from scipy.stats import uniform
from delphin_6_automation.sampling import sobol_lib
def sobol(m, dim, sets=1):
# WP6 - What is m?
# WP6 - What is dim - dimensions?
# WP6 - sets of what?
design = np.empty([0, dim])
for i in range(sets):
d = sobol_lib.scrambled_sobol_generate(k=dim, N=m, skip=2, leap=0)
design = np.vstack((design, d))
return design
def main(scenario, dist, runs, sets, strat, path, seq=None):
# WP6 - scenario - Is it a dataframe?
# WP6 - dist - distribution?
# WP6 - runs - number of runs
# WP6 - sets - sets of samplings?
# WP6 - strat - strategy?
# WP6 - path - path to sampling scheme?
if not os.path.exists(path):
os.mkdir(path)
# WP6 - Load an existing Matlab file?
# WP6 - Is that needed for WP6?
if strat == 'load':
# load file
file = os.path.join(path, 'samples_raw.mat')
samples_mat = sio.loadmat(file)['design']
samples_raw = np.empty((0, dist.shape[0]))
for i in range(samples_mat.shape[1]):
samples_raw = np.vstack((samples_raw, samples_mat[:, i].tolist()[0]))
else:
# WP6 - What is the difference between the two? Wouldn't WP6 always want sobol convergence?
# create raw sampling scheme
if strat == 'sobol':
samples_raw = sobol(m=runs, dim=dist.shape[0], sets=sets)
elif strat == 'sobol convergence':
try:
samples_raw = np.load(os.path.join(path, 'samples_raw_' + str(seq) + '.npy'))
samples = pd.read_pickle(os.path.join(path, 'samples_' + str(seq)))
except FileNotFoundError:
samples_raw = sobol(m=2 ** 12, dim=dist.shape[0], sets=1)
np.save(os.path.join(path, 'samples_raw_' + str(seq)), samples_raw)
samples_raw = samples_raw[sets:sets + runs, :]
else:
print("Error: This sampling strategy is not supperted. Currently only 'sobol' and 'load' are implemented.")
try:
# WP6 - What is the purpose?
samples = samples
except NameError:
# WP6 - We are moving data from one dict to another?
samples = pd.DataFrame({})
samples[scenario['parameter']] = []
for p in dist['parameter']:
samples[p] = []
# Add samples to dictionary
for s in scenario['value']:
# WP6 - What is SDF?
sdf = pd.DataFrame({})
sdf[scenario['parameter']] = [s] * samples_raw.shape[0]
for i in range(dist.shape[0]):
dist_type = dist.at[i, 'type']
x = samples_raw[:, i]
if dist_type == 'discrete':
# WP6 - What is pl?
p1 = dist.at[i, 'value']
if isinstance(p1, int):
high = p1 + 1
values = randint.ppf(x, low=1, high=high).tolist()
else:
high = len(p1)
values = randint.ppf(x, low=0, high=high).tolist()
# WP6 - What is the point of this loop? Data copying?
values = [p1[int(x)] for x in values]
sdf[dist.at[i, 'parameter']] = values
elif dist_type == 'uniform':
p1 = dist.at[i, 'value'][0]
p2 = dist.at[i, 'value'][1]
values = uniform.ppf(x, loc=p1, scale=p2 - p1).tolist()
sdf[dist.at[i, 'parameter']] = values
elif dist_type == 'normal':
p1 = dist.at[i, 'value'][0]
p2 = dist.at[i, 'value'][1]
values = norm.ppf(x, loc=p1, scale=p2).tolist()
sdf[dist.at[i, 'parameter']] = values
else:
print('ERROR: distribution type not supported')
sys.exit()
samples = samples.append(sdf, ignore_index=True)
# Save samples
# WP6 - Didn't we already load the file? Why are we checking if the name exists?
if seq == None:
name = 'samples'
else:
name = 'samples_' + str(seq)
# WP6 - Why save it twice?
samples.to_excel(os.path.join(path, name + '.xlsx'))
samples.to_pickle(os.path.join(path, name))
return samples
| mit |
TIm097/Bachelorarbeit | Hubb_Strom.py | 1 | 4021 | # Stromoperator Erwartungswert im Zeitmittel
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
# Zustände des 36 D Hilbertraums:
Z = np.genfromtxt('Stationäre_Systeme/Hubb_Ham/Hubb_Zust.txt', unpack = 'True')
# Orte:
u = np.genfromtxt('Hubb_Ham_Zeit_Lösungen/U0_E01_v0.txt', unpack = 'True').T # Lösung der SG, real und imaginär untereinander
N = np.round(np.shape(u)[1]/2).astype(int) # Anzahl Iterationsschritte pro Lösung
W = np.round(np.shape(u)[0]/36).astype(int)
print(W)
x = u[:,0:N] + 1j*u[:,N:N*2]
u1 = np.genfromtxt('Hubb_Ham_Zeit_Lösungen/U0_E01_v1.txt', unpack = 'True').T # Lösung der SG, real und imaginär untereinander
x1 = u1[:,0:N] + 1j*u1[:,N:N*2]
u2 = np.genfromtxt('Hubb_Ham_Zeit_Lösungen/U0_E01_v2.txt', unpack = 'True').T # Lösung der SG, real und imaginär untereinander
x2 = u2[:,0:N] + 1j*u2[:,N:N*2]
u3 = np.genfromtxt('Hubb_Ham_Zeit_Lösungen/U0_E01_v3.txt', unpack = 'True').T # Lösung der SG, real und imaginär untereinander
x3 = u3[:,0:N] + 1j*u3[:,N:N*2]
#u_lambda = np.genfromtxt('Hubb_Ham_Zeit_txt/Hubb_Ham_Zeit_Lsg_lambda.txt', unpack = 'True').T # Lösung der SG mit Diagohüpfen, real und imaginär untereinander
#x_lambda = u_lambda[:,0:N] + 1j*u_lambda[:,N:N*2]
# Zeit:
t = np.genfromtxt('Hubb_Ham_Zeit_Lösungen/Linspace.txt', unpack = 'True')
t0 = t[0]
t1 = t[np.shape(t)[0]-1]
# Stromoperatormatrix:
J = np.genfromtxt('Hubb_Ham_Zeit_Lösungen/Hubb_Strom_Matrix.txt', unpack = 'True').T
J = 1j*J
#Erwartungswert des Stromoperators in Abhängigkeit von der Zeit t:
def StromErwartungswert(z):
global J
EJ = np.zeros((N,1))*1j
for n in range(0,N): # alle Zeiten
EJ[n] = np.conj(z[:,n]).dot(J.dot(z[:,n]))
q = 1
EJ = q * EJ
return EJ
#EJ_lambda = np.zeros((N,1))*1j
#for n in range(0,N): # alle Zeiten
# EJ_lambda[n] = np.conj(x_lambda[:,n]).dot(J.dot(x_lambda[:,n]))
#EJ_lambda = q * EJ_lambda
SE1 = 0.25*(StromErwartungswert(x[0*36:(0+1)*36,0:N])+StromErwartungswert(x1[0*36:(0+1)*36,0:N])+StromErwartungswert(x2[0*36:(0+1)*36,0:N])+StromErwartungswert(x3[0*36:(0+1)*36,0:N]))
SE2 = 0.25*(StromErwartungswert(x[1*36:(1+1)*36,0:N])+StromErwartungswert(x1[1*36:(1+1)*36,0:N])+StromErwartungswert(x2[1*36:(1+1)*36,0:N])+StromErwartungswert(x3[1*36:(1+1)*36,0:N]))
# Mittelwert:
def StromMittelwert(z):
J_t = StromErwartungswert(z)
M = 0.25*np.sum(J_t)/N
return M
#J_w = np.zeros((W,1)) # Strommittelwerte in Abhängigkeit von W
#w_lin = np.zeros((W,1))
#for w in range(0,W):
# J_w[w] = StromMittelwert(x[w*36:(w+1)*36,0:N])
# w_lin[w] = 1.5*w/(W-1) + 0.5
# w_lin[w] = w +1
#
#print('Strommittelwerte (omega aufsteigend):', J_w)
#M_lambda = np.sum(EJ_lambda)/N
#print('Mittelwert:', M,
#'Mittelwert(lambda):', M_lambda)
plt.plot(t, SE1.real, label= r'$\omega = 1.0 \, J/\hbar$', linewidth = 1)
plt.plot(t, SE2.real, label= r'$\omega = 2.0 \, J/\hbar$', linewidth = 0.75)
#plt.plot((76,76),(-0.03,0.03),'k-.', label= r'$t_\text{max}$', linewidth = 0.85)
#plt.plot(t, StromErwartungswert(x[0*36:(0+1)*36,0:N]).real, label= r'$\omega = 1.0$')
#plt.plot(t, StromErwartungswert(x[1*36:(1+1)*36,0:N]).real, label= r'$\omega = 2.0$')
#plt.plot(t, StromErwartungswert(x[8*36:(8+1)*36,0:N]).real, label= r'$\omega = 1.4$')
#plt.plot(t, StromErwartungswert(x[10*36:(10+1)*36,0:N]).real, label= r'$\omega = 1.6$')
#plt.plot(t, StromErwartungswert(x[12*36:(12+1)*36,0:N]).real, label= r'$\omega = 1.8$')
#plt.plot(t, StromErwartungswert(x[14*36:(14+1)*36,0:N]).real, label= r'$\omega = 2.0$')
#plt.plot(w_lin, J_w, 'rx')
#, label=r'$\bra{\psi_0(t)} \hat{J} \ket{\psi_0(t)}$')
plt.legend(loc='best')
plt.xlim(t0,t1)
plt.ylim(-0.0075, 0.01)
plt.xlabel(r'$t/\tfrac{\hbar}{J}$')
plt.ylabel(r'$I/\tfrac{J \symup{e}}{\hbar}$')
plt.grid()
plt.tight_layout()
plt.savefig('Plots/U0_E01_schoen.pdf')
plt.savefig('build/Hubb_Strom.pdf')
#plt.close()
#plt.plot(w_lin, J_w, 'rx')
#plt.xlabel(r'$\omega/\frac{J}{\hbar}$')
#plt.ylabel(r'$I/J \cdot x$')
#plt.savefig('Plots/U0_E01_omega_schoen.pdf')
| mit |
nmayorov/scikit-learn | sklearn/base.py | 18 | 17981 | """Base classes for all estimators."""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import copy
import warnings
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from .utils.deprecation import deprecated
from .exceptions import ChangedBehaviorWarning as _ChangedBehaviorWarning
@deprecated("ChangedBehaviorWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class ChangedBehaviorWarning(_ChangedBehaviorWarning):
pass
##############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
| bsd-3-clause |
anomam/pvlib-python | pvlib/irradiance.py | 1 | 106789 | """
The ``irradiance`` module contains functions for modeling global
horizontal irradiance, direct normal irradiance, diffuse horizontal
irradiance, and total irradiance under various conditions.
"""
import datetime
from collections import OrderedDict
from functools import partial
import numpy as np
import pandas as pd
from pvlib import atmosphere, solarposition, tools
# see References section of grounddiffuse function
SURFACE_ALBEDOS = {'urban': 0.18,
'grass': 0.20,
'fresh grass': 0.26,
'soil': 0.17,
'sand': 0.40,
'snow': 0.65,
'fresh snow': 0.75,
'asphalt': 0.12,
'concrete': 0.30,
'aluminum': 0.85,
'copper': 0.74,
'fresh steel': 0.35,
'dirty steel': 0.08,
'sea': 0.06}
def get_extra_radiation(datetime_or_doy, solar_constant=1366.1,
method='spencer', epoch_year=2014, **kwargs):
"""
Determine extraterrestrial radiation from day of year.
Parameters
----------
datetime_or_doy : numeric, array, date, datetime, Timestamp, DatetimeIndex
Day of year, array of days of year, or datetime-like object
solar_constant : float, default 1366.1
The solar constant.
method : string, default 'spencer'
The method by which the ET radiation should be calculated.
Options include ``'pyephem', 'spencer', 'asce', 'nrel'``.
epoch_year : int, default 2014
The year in which a day of year input will be calculated. Only
applies to day of year input used with the pyephem or nrel
methods.
kwargs :
Passed to solarposition.nrel_earthsun_distance
Returns
-------
dni_extra : float, array, or Series
The extraterrestrial radiation present in watts per square meter
on a surface which is normal to the sun. Pandas Timestamp and
DatetimeIndex inputs will yield a Pandas TimeSeries. All other
inputs will yield a float or an array of floats.
References
----------
.. [1] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance
Clear Sky Models: Implementation and Analysis", Sandia National
Laboratories, SAND2012-2389, 2012.
.. [2] <http://solardat.uoregon.edu/SolarRadiationBasics.html>, Eqs.
SR1 and SR2
.. [3] Partridge, G. W. and Platt, C. M. R. 1976. Radiative Processes
in Meteorology and Climatology.
.. [4] Duffie, J. A. and Beckman, W. A. 1991. Solar Engineering of
Thermal Processes, 2nd edn. J. Wiley and Sons, New York.
.. [5] ASCE, 2005. The ASCE Standardized Reference Evapotranspiration
Equation, Environmental and Water Resources Institute of the American
Civil Engineers, Ed. R. G. Allen et al.
"""
to_doy, to_datetimeindex, to_output = \
_handle_extra_radiation_types(datetime_or_doy, epoch_year)
# consider putting asce and spencer methods in their own functions
method = method.lower()
if method == 'asce':
B = solarposition._calculate_simple_day_angle(to_doy(datetime_or_doy),
offset=0)
RoverR0sqrd = 1 + 0.033 * np.cos(B)
elif method == 'spencer':
B = solarposition._calculate_simple_day_angle(to_doy(datetime_or_doy))
RoverR0sqrd = (1.00011 + 0.034221 * np.cos(B) + 0.00128 * np.sin(B) +
0.000719 * np.cos(2 * B) + 7.7e-05 * np.sin(2 * B))
elif method == 'pyephem':
times = to_datetimeindex(datetime_or_doy)
RoverR0sqrd = solarposition.pyephem_earthsun_distance(times) ** (-2)
elif method == 'nrel':
times = to_datetimeindex(datetime_or_doy)
RoverR0sqrd = \
solarposition.nrel_earthsun_distance(times, **kwargs) ** (-2)
else:
raise ValueError('Invalid method: %s', method)
Ea = solar_constant * RoverR0sqrd
Ea = to_output(Ea)
return Ea
def _handle_extra_radiation_types(datetime_or_doy, epoch_year):
# This block will set the functions that can be used to convert the
# inputs to either day of year or pandas DatetimeIndex, and the
# functions that will yield the appropriate output type. It's
# complicated because there are many day-of-year-like input types,
# and the different algorithms need different types. Maybe you have
# a better way to do it.
if isinstance(datetime_or_doy, pd.DatetimeIndex):
to_doy = tools._pandas_to_doy # won't be evaluated unless necessary
def to_datetimeindex(x): return x # noqa: E306
to_output = partial(pd.Series, index=datetime_or_doy)
elif isinstance(datetime_or_doy, pd.Timestamp):
to_doy = tools._pandas_to_doy
to_datetimeindex = \
tools._datetimelike_scalar_to_datetimeindex
to_output = tools._scalar_out
elif isinstance(datetime_or_doy,
(datetime.date, datetime.datetime, np.datetime64)):
to_doy = tools._datetimelike_scalar_to_doy
to_datetimeindex = \
tools._datetimelike_scalar_to_datetimeindex
to_output = tools._scalar_out
elif np.isscalar(datetime_or_doy): # ints and floats of various types
def to_doy(x): return x # noqa: E306
to_datetimeindex = partial(tools._doy_to_datetimeindex,
epoch_year=epoch_year)
to_output = tools._scalar_out
else: # assume that we have an array-like object of doy
def to_doy(x): return x # noqa: E306
to_datetimeindex = partial(tools._doy_to_datetimeindex,
epoch_year=epoch_year)
to_output = tools._array_out
return to_doy, to_datetimeindex, to_output
def aoi_projection(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth):
"""
Calculates the dot product of the sun position unit vector and the surface
normal unit vector; in other words, the cosine of the angle of incidence.
Usage note: When the sun is behind the surface the value returned is
negative. For many uses negative values must be set to zero.
Input all angles in degrees.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north.
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
Returns
-------
projection : numeric
Dot product of panel normal and solar angle.
"""
projection = (
tools.cosd(surface_tilt) * tools.cosd(solar_zenith) +
tools.sind(surface_tilt) * tools.sind(solar_zenith) *
tools.cosd(solar_azimuth - surface_azimuth))
try:
projection.name = 'aoi_projection'
except AttributeError:
pass
return projection
def aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth):
"""
Calculates the angle of incidence of the solar vector on a surface.
This is the angle between the solar vector and the surface normal.
Input all angles in degrees.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north.
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
Returns
-------
aoi : numeric
Angle of incidence in degrees.
"""
projection = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
aoi_value = np.rad2deg(np.arccos(projection))
try:
aoi_value.name = 'aoi'
except AttributeError:
pass
return aoi_value
def poa_horizontal_ratio(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth):
"""
Calculates the ratio of the beam components of the plane of array
irradiance and the horizontal irradiance.
Input all angles in degrees.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north.
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
Returns
-------
ratio : numeric
Ratio of the plane of array irradiance to the horizontal plane
irradiance
"""
cos_poa_zen = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
cos_solar_zenith = tools.cosd(solar_zenith)
# ratio of tilted and horizontal beam irradiance
ratio = cos_poa_zen / cos_solar_zenith
try:
ratio.name = 'poa_ratio'
except AttributeError:
pass
return ratio
def beam_component(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni):
"""
Calculates the beam component of the plane of array irradiance.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north.
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
dni : numeric
Direct Normal Irradiance
Returns
-------
beam : numeric
Beam component
"""
beam = dni * aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
beam = np.maximum(beam, 0)
return beam
def get_total_irradiance(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990', **kwargs):
r"""
Determine total in-plane irradiance and its beam, sky diffuse and ground
reflected components, using the specified sky diffuse irradiance model.
.. math::
I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground}
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north.
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
dni : numeric
Direct Normal Irradiance
ghi : numeric
Global horizontal irradiance
dhi : numeric
Diffuse horizontal irradiance
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance
airmass : None or numeric, default None
Airmass
albedo : numeric, default 0.25
Surface albedo
surface_type : None or String, default None
Surface type. See grounddiffuse.
model : String, default 'isotropic'
Irradiance model.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`perez`.
Returns
-------
total_irrad : OrderedDict or DataFrame
Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse',
'poa_sky_diffuse', 'poa_ground_diffuse'``.
"""
poa_sky_diffuse = get_sky_diffuse(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model,
model_perez=model_perez)
poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo,
surface_type)
aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse)
return irrads
def get_sky_diffuse(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=None, airmass=None,
model='isotropic',
model_perez='allsitescomposite1990'):
r"""
Determine in-plane sky diffuse irradiance component
using the specified sky diffuse irradiance model.
Sky diffuse models include:
* isotropic (default)
* klucher
* haydavies
* reindl
* king
* perez
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north.
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
dni : numeric
Direct Normal Irradiance
ghi : numeric
Global horizontal irradiance
dhi : numeric
Diffuse horizontal irradiance
dni_extra : None or numeric, default None
Extraterrestrial direct normal irradiance
airmass : None or numeric, default None
Airmass
model : String, default 'isotropic'
Irradiance model.
model_perez : String, default 'allsitescomposite1990'
See perez.
Returns
-------
poa_sky_diffuse : numeric
"""
model = model.lower()
if model == 'isotropic':
sky = isotropic(surface_tilt, dhi)
elif model == 'klucher':
sky = klucher(surface_tilt, surface_azimuth, dhi, ghi,
solar_zenith, solar_azimuth)
elif model == 'haydavies':
sky = haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith, solar_azimuth)
elif model == 'reindl':
sky = reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra,
solar_zenith, solar_azimuth)
elif model == 'king':
sky = king(surface_tilt, dhi, ghi, solar_zenith)
elif model == 'perez':
sky = perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith, solar_azimuth, airmass,
model=model_perez)
else:
raise ValueError('invalid model selection {}'.format(model))
return sky
def poa_components(aoi, dni, poa_sky_diffuse, poa_ground_diffuse):
r'''
Determine in-plane irradiance components.
Combines DNI with sky diffuse and ground-reflected irradiance to calculate
total, direct and diffuse irradiance components in the plane of array.
Parameters
----------
aoi : numeric
Angle of incidence of solar rays with respect to the module
surface, from :func:`aoi`.
dni : numeric
Direct normal irradiance (W/m^2), as measured from a TMY file or
calculated with a clearsky model.
poa_sky_diffuse : numeric
Diffuse irradiance (W/m^2) in the plane of the modules, as
calculated by a diffuse irradiance translation function
poa_ground_diffuse : numeric
Ground reflected irradiance (W/m^2) in the plane of the modules,
as calculated by an albedo model (eg. :func:`grounddiffuse`)
Returns
-------
irrads : OrderedDict or DataFrame
Contains the following keys:
* ``poa_global`` : Total in-plane irradiance (W/m^2)
* ``poa_direct`` : Total in-plane beam irradiance (W/m^2)
* ``poa_diffuse`` : Total in-plane diffuse irradiance (W/m^2)
* ``poa_sky_diffuse`` : In-plane diffuse irradiance from sky (W/m^2)
* ``poa_ground_diffuse`` : In-plane diffuse irradiance from ground
(W/m^2)
Notes
------
Negative beam irradiation due to aoi :math:`> 90^{\circ}` or AOI
:math:`< 0^{\circ}` is set to zero.
'''
poa_direct = np.maximum(dni * np.cos(np.radians(aoi)), 0)
poa_diffuse = poa_sky_diffuse + poa_ground_diffuse
poa_global = poa_direct + poa_diffuse
irrads = OrderedDict()
irrads['poa_global'] = poa_global
irrads['poa_direct'] = poa_direct
irrads['poa_diffuse'] = poa_diffuse
irrads['poa_sky_diffuse'] = poa_sky_diffuse
irrads['poa_ground_diffuse'] = poa_ground_diffuse
if isinstance(poa_direct, pd.Series):
irrads = pd.DataFrame(irrads)
return irrads
def get_ground_diffuse(surface_tilt, ghi, albedo=.25, surface_type=None):
'''
Estimate diffuse irradiance from ground reflections given
irradiance, albedo, and surface tilt
Function to determine the portion of irradiance on a tilted surface
due to ground reflections. Any of the inputs may be DataFrames or
scalars.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. Tilt must be >=0 and
<=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90).
ghi : numeric
Global horizontal irradiance in W/m^2.
albedo : numeric, default 0.25
Ground reflectance, typically 0.1-0.4 for surfaces on Earth
(land), may increase over snow, ice, etc. May also be known as
the reflection coefficient. Must be >=0 and <=1. Will be
overridden if surface_type is supplied.
surface_type: None or string, default None
If not None, overrides albedo. String can be one of 'urban',
'grass', 'fresh grass', 'snow', 'fresh snow', 'asphalt', 'concrete',
'aluminum', 'copper', 'fresh steel', 'dirty steel', 'sea'.
Returns
-------
grounddiffuse : numeric
Ground reflected irradiances in W/m^2.
References
----------
.. [1] Loutzenhiser P.G. et. al. "Empirical validation of models to compute
solar irradiance on inclined surfaces for building energy simulation"
2007, Solar Energy vol. 81. pp. 254-267.
The calculation is the last term of equations 3, 4, 7, 8, 10, 11, and 12.
.. [2] albedos from:
http://files.pvsyst.com/help/albedo.htm
and
http://en.wikipedia.org/wiki/Albedo
and
https://doi.org/10.1175/1520-0469(1972)029<0959:AOTSS>2.0.CO;2
'''
if surface_type is not None:
albedo = SURFACE_ALBEDOS[surface_type]
diffuse_irrad = ghi * albedo * (1 - np.cos(np.radians(surface_tilt))) * 0.5
try:
diffuse_irrad.name = 'diffuse_ground'
except AttributeError:
pass
return diffuse_irrad
def isotropic(surface_tilt, dhi):
r'''
Determine diffuse irradiance from the sky on a tilted surface using
the isotropic sky model.
.. math::
I_{d} = DHI \frac{1 + \cos\beta}{2}
Hottel and Woertz's model treats the sky as a uniform source of
diffuse irradiance. Thus the diffuse irradiance from the sky (ground
reflected irradiance is not included in this algorithm) on a tilted
surface can be found from the diffuse horizontal irradiance and the
tilt angle of the surface.
Parameters
----------
surface_tilt : numeric
Surface tilt angle in decimal degrees. Tilt must be >=0 and
<=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
dhi : numeric
Diffuse horizontal irradiance in W/m^2. DHI must be >=0.
Returns
-------
diffuse : numeric
The sky diffuse component of the solar radiation.
References
----------
.. [1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
.. [2] Hottel, H.C., Woertz, B.B., 1942. Evaluation of flat-plate solar
heat collector. Trans. ASME 64, 91.
'''
sky_diffuse = dhi * (1 + tools.cosd(surface_tilt)) * 0.5
return sky_diffuse
def klucher(surface_tilt, surface_azimuth, dhi, ghi, solar_zenith,
solar_azimuth):
r'''
Determine diffuse irradiance from the sky on a tilted surface
using Klucher's 1979 model
.. math::
I_{d} = DHI \frac{1 + \cos\beta}{2} (1 + F' \sin^3(\beta/2))
(1 + F' \cos^2\theta\sin^3\theta_z)
where
.. math::
F' = 1 - (I_{d0} / GHI)^2
Klucher's 1979 model determines the diffuse irradiance from the sky
(ground reflected irradiance is not included in this algorithm) on a
tilted surface using the surface tilt angle, surface azimuth angle,
diffuse horizontal irradiance, direct normal irradiance, global
horizontal irradiance, extraterrestrial irradiance, sun zenith
angle, and sun azimuth angle.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. surface_tilt must be >=0
and <=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The Azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2. DHI must be >=0.
ghi : numeric
Global irradiance in W/m^2. DNI must be >=0.
solar_zenith : numeric
Apparent (refraction-corrected) zenith angles in decimal
degrees. solar_zenith must be >=0 and <=180.
solar_azimuth : numeric
Sun azimuth angles in decimal degrees. solar_azimuth must be >=0
and <=360. The Azimuth convention is defined as degrees east of
north (e.g. North = 0, East = 90, West = 270).
Returns
-------
diffuse : numeric
The sky diffuse component of the solar radiation.
References
----------
.. [1] Loutzenhiser P.G. et. al. "Empirical validation of models to compute
solar irradiance on inclined surfaces for building energy simulation"
2007, Solar Energy vol. 81. pp. 254-267
.. [2] Klucher, T.M., 1979. Evaluation of models to predict insolation on
tilted surfaces. Solar Energy 23 (2), 111-114.
'''
# zenith angle with respect to panel normal.
cos_tt = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
cos_tt = np.maximum(cos_tt, 0) # GH 526
# silence warning from 0 / 0
with np.errstate(invalid='ignore'):
F = 1 - ((dhi / ghi) ** 2)
try:
# fails with single point input
F.fillna(0, inplace=True)
except AttributeError:
F = np.where(np.isnan(F), 0, F)
term1 = 0.5 * (1 + tools.cosd(surface_tilt))
term2 = 1 + F * (tools.sind(0.5 * surface_tilt) ** 3)
term3 = 1 + F * (cos_tt ** 2) * (tools.sind(solar_zenith) ** 3)
sky_diffuse = dhi * term1 * term2 * term3
return sky_diffuse
def haydavies(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith=None, solar_azimuth=None, projection_ratio=None):
r'''
Determine diffuse irradiance from the sky on a tilted surface using
Hay & Davies' 1980 model
.. math::
I_{d} = DHI ( A R_b + (1 - A) (\frac{1 + \cos\beta}{2}) )
Hay and Davies' 1980 model determines the diffuse irradiance from
the sky (ground reflected irradiance is not included in this
algorithm) on a tilted surface using the surface tilt angle, surface
azimuth angle, diffuse horizontal irradiance, direct normal
irradiance, extraterrestrial irradiance, sun zenith angle, and sun
azimuth angle.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. The tilt angle is
defined as degrees from horizontal (e.g. surface facing up = 0,
surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. The azimuth
convention is defined as degrees east of north (e.g. North=0,
South=180, East=90, West=270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2.
dni : numeric
Direct normal irradiance in W/m^2.
dni_extra : numeric
Extraterrestrial normal irradiance in W/m^2.
solar_zenith : None or numeric, default None
Solar apparent (refraction-corrected) zenith angles in decimal
degrees. Must supply ``solar_zenith`` and ``solar_azimuth`` or
supply ``projection_ratio``.
solar_azimuth : None or numeric, default None
Solar azimuth angles in decimal degrees. Must supply
``solar_zenith`` and ``solar_azimuth`` or supply
``projection_ratio``.
projection_ratio : None or numeric, default None
Ratio of angle of incidence projection to solar zenith angle
projection. Must supply ``solar_zenith`` and ``solar_azimuth``
or supply ``projection_ratio``.
Returns
--------
sky_diffuse : numeric
The sky diffuse component of the solar radiation.
References
-----------
.. [1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
.. [2] Hay, J.E., Davies, J.A., 1980. Calculations of the solar
radiation incident on an inclined surface. In: Hay, J.E., Won, T.K.
(Eds.), Proc. of First Canadian Solar Radiation Data Workshop, 59.
Ministry of Supply and Services, Canada.
'''
# if necessary, calculate ratio of titled and horizontal beam irradiance
if projection_ratio is None:
cos_tt = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
cos_tt = np.maximum(cos_tt, 0) # GH 526
cos_solar_zenith = tools.cosd(solar_zenith)
Rb = cos_tt / np.maximum(cos_solar_zenith, 0.01745) # GH 432
else:
Rb = projection_ratio
# Anisotropy Index
AI = dni / dni_extra
# these are the () and [] sub-terms of the second term of eqn 7
term1 = 1 - AI
term2 = 0.5 * (1 + tools.cosd(surface_tilt))
sky_diffuse = dhi * (AI * Rb + term1 * term2)
sky_diffuse = np.maximum(sky_diffuse, 0)
return sky_diffuse
def reindl(surface_tilt, surface_azimuth, dhi, dni, ghi, dni_extra,
solar_zenith, solar_azimuth):
r'''
Determine diffuse irradiance from the sky on a tilted surface using
Reindl's 1990 model
.. math::
I_{d} = DHI (A R_b + (1 - A) (\frac{1 + \cos\beta}{2})
(1 + \sqrt{\frac{I_{hb}}{I_h}} \sin^3(\beta/2)) )
Reindl's 1990 model determines the diffuse irradiance from the sky
(ground reflected irradiance is not included in this algorithm) on a
tilted surface using the surface tilt angle, surface azimuth angle,
diffuse horizontal irradiance, direct normal irradiance, global
horizontal irradiance, extraterrestrial irradiance, sun zenith
angle, and sun azimuth angle.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. The tilt angle is
defined as degrees from horizontal (e.g. surface facing up = 0,
surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. The azimuth
convention is defined as degrees east of north (e.g. North = 0,
South=180 East = 90, West = 270).
dhi : numeric
diffuse horizontal irradiance in W/m^2.
dni : numeric
direct normal irradiance in W/m^2.
ghi: numeric
Global irradiance in W/m^2.
dni_extra : numeric
Extraterrestrial normal irradiance in W/m^2.
solar_zenith : numeric
Apparent (refraction-corrected) zenith angles in decimal degrees.
solar_azimuth : numeric
Sun azimuth angles in decimal degrees. The azimuth convention is
defined as degrees east of north (e.g. North = 0, East = 90,
West = 270).
Returns
-------
poa_sky_diffuse : numeric
The sky diffuse component of the solar radiation.
Notes
-----
The poa_sky_diffuse calculation is generated from the Loutzenhiser et al.
(2007) paper, equation 8. Note that I have removed the beam and ground
reflectance portion of the equation and this generates ONLY the diffuse
radiation from the sky and circumsolar, so the form of the equation
varies slightly from equation 8.
References
----------
.. [1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
.. [2] Reindl, D.T., Beckmann, W.A., Duffie, J.A., 1990a. Diffuse
fraction correlations. Solar Energy 45(1), 1-7.
.. [3] Reindl, D.T., Beckmann, W.A., Duffie, J.A., 1990b. Evaluation of
hourly tilted surface radiation models. Solar Energy 45(1), 9-17.
'''
cos_tt = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
cos_tt = np.maximum(cos_tt, 0) # GH 526
# do not apply cos(zen) limit here (needed for HB below)
cos_solar_zenith = tools.cosd(solar_zenith)
# ratio of titled and horizontal beam irradiance
Rb = cos_tt / np.maximum(cos_solar_zenith, 0.01745) # GH 432
# Anisotropy Index
AI = dni / dni_extra
# DNI projected onto horizontal
HB = dni * cos_solar_zenith
HB = np.maximum(HB, 0)
# these are the () and [] sub-terms of the second term of eqn 8
term1 = 1 - AI
term2 = 0.5 * (1 + tools.cosd(surface_tilt))
term3 = 1 + np.sqrt(HB / ghi) * (tools.sind(0.5 * surface_tilt) ** 3)
sky_diffuse = dhi * (AI * Rb + term1 * term2 * term3)
sky_diffuse = np.maximum(sky_diffuse, 0)
return sky_diffuse
def king(surface_tilt, dhi, ghi, solar_zenith):
'''
Determine diffuse irradiance from the sky on a tilted surface using
the King model.
King's model determines the diffuse irradiance from the sky (ground
reflected irradiance is not included in this algorithm) on a tilted
surface using the surface tilt angle, diffuse horizontal irradiance,
global horizontal irradiance, and sun zenith angle. Note that this
model is not well documented and has not been published in any
fashion (as of January 2012).
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. The tilt angle is
defined as degrees from horizontal (e.g. surface facing up = 0,
surface facing horizon = 90)
dhi : numeric
Diffuse horizontal irradiance in W/m^2.
ghi : numeric
Global horizontal irradiance in W/m^2.
solar_zenith : numeric
Apparent (refraction-corrected) zenith angles in decimal degrees.
Returns
--------
poa_sky_diffuse : numeric
The diffuse component of the solar radiation.
'''
sky_diffuse = (dhi * ((1 + tools.cosd(surface_tilt))) / 2 + ghi *
((0.012 * solar_zenith - 0.04)) *
((1 - tools.cosd(surface_tilt))) / 2)
sky_diffuse = np.maximum(sky_diffuse, 0)
return sky_diffuse
def perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith, solar_azimuth, airmass,
model='allsitescomposite1990', return_components=False):
'''
Determine diffuse irradiance from the sky on a tilted surface using
one of the Perez models.
Perez models determine the diffuse irradiance from the sky (ground
reflected irradiance is not included in this algorithm) on a tilted
surface using the surface tilt angle, surface azimuth angle, diffuse
horizontal irradiance, direct normal irradiance, extraterrestrial
irradiance, sun zenith angle, sun azimuth angle, and relative (not
pressure-corrected) airmass. Optionally a selector may be used to
use any of Perez's model coefficient sets.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. surface_tilt must be >=0
and <=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2. DHI must be >=0.
dni : numeric
Direct normal irradiance in W/m^2. DNI must be >=0.
dni_extra : numeric
Extraterrestrial normal irradiance in W/m^2.
solar_zenith : numeric
apparent (refraction-corrected) zenith angles in decimal
degrees. solar_zenith must be >=0 and <=180.
solar_azimuth : numeric
Sun azimuth angles in decimal degrees. solar_azimuth must be >=0
and <=360. The azimuth convention is defined as degrees east of
north (e.g. North = 0, East = 90, West = 270).
airmass : numeric
Relative (not pressure-corrected) airmass values. If AM is a
DataFrame it must be of the same size as all other DataFrame
inputs. AM must be >=0 (careful using the 1/sec(z) model of AM
generation)
model : string (optional, default='allsitescomposite1990')
A string which selects the desired set of Perez coefficients. If
model is not provided as an input, the default, '1990' will be
used. All possible model selections are:
* '1990'
* 'allsitescomposite1990' (same as '1990')
* 'allsitescomposite1988'
* 'sandiacomposite1988'
* 'usacomposite1988'
* 'france1988'
* 'phoenix1988'
* 'elmonte1988'
* 'osage1988'
* 'albuquerque1988'
* 'capecanaveral1988'
* 'albany1988'
return_components: bool (optional, default=False)
Flag used to decide whether to return the calculated diffuse components
or not.
Returns
--------
numeric, OrderedDict, or DataFrame
Return type controlled by `return_components` argument.
If ``return_components=False``, `sky_diffuse` is returned.
If ``return_components=True``, `diffuse_components` is returned.
sky_diffuse : numeric
The sky diffuse component of the solar radiation on a tilted
surface.
diffuse_components : OrderedDict (array input) or DataFrame (Series input)
Keys/columns are:
* sky_diffuse: Total sky diffuse
* isotropic
* circumsolar
* horizon
References
----------
.. [1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
.. [2] Perez, R., Seals, R., Ineichen, P., Stewart, R., Menicucci, D.,
1987. A new simplified version of the Perez diffuse irradiance model
for tilted surfaces. Solar Energy 39(3), 221-232.
.. [3] Perez, R., Ineichen, P., Seals, R., Michalsky, J., Stewart, R.,
1990. Modeling daylight availability and irradiance components from
direct and global irradiance. Solar Energy 44 (5), 271-289.
.. [4] Perez, R. et. al 1988. "The Development and Verification of the
Perez Diffuse Radiation Model". SAND88-7030
'''
kappa = 1.041 # for solar_zenith in radians
z = np.radians(solar_zenith) # convert to radians
# delta is the sky's "brightness"
delta = dhi * airmass / dni_extra
# epsilon is the sky's "clearness"
with np.errstate(invalid='ignore'):
eps = ((dhi + dni) / dhi + kappa * (z ** 3)) / (1 + kappa * (z ** 3))
# numpy indexing below will not work with a Series
if isinstance(eps, pd.Series):
eps = eps.values
# Perez et al define clearness bins according to the following
# rules. 1 = overcast ... 8 = clear (these names really only make
# sense for small zenith angles, but...) these values will
# eventually be used as indicies for coeffecient look ups
ebin = np.digitize(eps, (0., 1.065, 1.23, 1.5, 1.95, 2.8, 4.5, 6.2))
ebin = np.array(ebin) # GH 642
ebin[np.isnan(eps)] = 0
# correct for 0 indexing in coeffecient lookup
# later, ebin = -1 will yield nan coefficients
ebin -= 1
# The various possible sets of Perez coefficients are contained
# in a subfunction to clean up the code.
F1c, F2c = _get_perez_coefficients(model)
# results in invalid eps (ebin = -1) being mapped to nans
nans = np.array([np.nan, np.nan, np.nan])
F1c = np.vstack((F1c, nans))
F2c = np.vstack((F2c, nans))
F1 = (F1c[ebin, 0] + F1c[ebin, 1] * delta + F1c[ebin, 2] * z)
F1 = np.maximum(F1, 0)
F2 = (F2c[ebin, 0] + F2c[ebin, 1] * delta + F2c[ebin, 2] * z)
F2 = np.maximum(F2, 0)
A = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
A = np.maximum(A, 0)
B = tools.cosd(solar_zenith)
B = np.maximum(B, tools.cosd(85))
# Calculate Diffuse POA from sky dome
term1 = 0.5 * (1 - F1) * (1 + tools.cosd(surface_tilt))
term2 = F1 * A / B
term3 = F2 * tools.sind(surface_tilt)
sky_diffuse = np.maximum(dhi * (term1 + term2 + term3), 0)
# we've preserved the input type until now, so don't ruin it!
if isinstance(sky_diffuse, pd.Series):
sky_diffuse[np.isnan(airmass)] = 0
else:
sky_diffuse = np.where(np.isnan(airmass), 0, sky_diffuse)
if return_components:
diffuse_components = OrderedDict()
diffuse_components['sky_diffuse'] = sky_diffuse
# Calculate the different components
diffuse_components['isotropic'] = dhi * term1
diffuse_components['circumsolar'] = dhi * term2
diffuse_components['horizon'] = dhi * term3
# Set values of components to 0 when sky_diffuse is 0
mask = sky_diffuse == 0
if isinstance(sky_diffuse, pd.Series):
diffuse_components = pd.DataFrame(diffuse_components)
diffuse_components.loc[mask] = 0
else:
diffuse_components = {k: np.where(mask, 0, v) for k, v in
diffuse_components.items()}
return diffuse_components
else:
return sky_diffuse
def clearsky_index(ghi, clearsky_ghi, max_clearsky_index=2.0):
"""
Calculate the clearsky index.
The clearsky index is the ratio of global to clearsky global irradiance.
Negative and non-finite clearsky index values will be truncated to zero.
Parameters
----------
ghi : numeric
Global horizontal irradiance in W/m^2.
clearsky_ghi : numeric
Modeled clearsky GHI
max_clearsky_index : numeric, default 2.0
Maximum value of the clearsky index. The default, 2.0, allows
for over-irradiance events typically seen in sub-hourly data.
Returns
-------
clearsky_index : numeric
Clearsky index
"""
clearsky_index = ghi / clearsky_ghi
# set +inf, -inf, and nans to zero
clearsky_index = np.where(~np.isfinite(clearsky_index), 0,
clearsky_index)
# but preserve nans in the input arrays
input_is_nan = ~np.isfinite(ghi) | ~np.isfinite(clearsky_ghi)
clearsky_index = np.where(input_is_nan, np.nan, clearsky_index)
clearsky_index = np.maximum(clearsky_index, 0)
clearsky_index = np.minimum(clearsky_index, max_clearsky_index)
# preserve input type
if isinstance(ghi, pd.Series):
clearsky_index = pd.Series(clearsky_index, index=ghi.index)
return clearsky_index
def clearness_index(ghi, solar_zenith, extra_radiation, min_cos_zenith=0.065,
max_clearness_index=2.0):
"""
Calculate the clearness index.
The clearness index is the ratio of global to extraterrestrial
irradiance on a horizontal plane [1]_.
Parameters
----------
ghi : numeric
Global horizontal irradiance in W/m^2.
solar_zenith : numeric
True (not refraction-corrected) solar zenith angle in decimal
degrees.
extra_radiation : numeric
Irradiance incident at the top of the atmosphere
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_clearness_index : numeric, default 2.0
Maximum value of the clearness index. The default, 2.0, allows
for over-irradiance events typically seen in sub-hourly data.
NREL's SRRL Fortran code used 0.82 for hourly data.
Returns
-------
kt : numeric
Clearness index
References
----------
.. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly
Global Horizontal to Direct Normal Insolation", Technical
Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research
Institute, 1987.
"""
cos_zenith = tools.cosd(solar_zenith)
I0h = extra_radiation * np.maximum(cos_zenith, min_cos_zenith)
# consider adding
# with np.errstate(invalid='ignore', divide='ignore'):
# to kt calculation, but perhaps it's good to allow these
# warnings to the users that override min_cos_zenith
kt = ghi / I0h
kt = np.maximum(kt, 0)
kt = np.minimum(kt, max_clearness_index)
return kt
def clearness_index_zenith_independent(clearness_index, airmass,
max_clearness_index=2.0):
"""
Calculate the zenith angle independent clearness index.
See [1]_ for details.
Parameters
----------
clearness_index : numeric
Ratio of global to extraterrestrial irradiance on a horizontal
plane
airmass : numeric
Airmass
max_clearness_index : numeric, default 2.0
Maximum value of the clearness index. The default, 2.0, allows
for over-irradiance events typically seen in sub-hourly data.
NREL's SRRL Fortran code used 0.82 for hourly data.
Returns
-------
kt_prime : numeric
Zenith independent clearness index
References
----------
.. [1] Perez, R., P. Ineichen, E. Maxwell, R. Seals and A. Zelenka,
(1992). "Dynamic Global-to-Direct Irradiance Conversion Models".
ASHRAE Transactions-Research Series, pp. 354-369
"""
# Perez eqn 1
kt_prime = clearness_index / _kt_kt_prime_factor(airmass)
kt_prime = np.maximum(kt_prime, 0)
kt_prime = np.minimum(kt_prime, max_clearness_index)
return kt_prime
def _kt_kt_prime_factor(airmass):
"""
Calculate the conversion factor between kt and kt prime.
Function is useful because DIRINT and GTI-DIRINT both use this.
"""
# consider adding
# airmass = np.maximum(airmass, 12) # GH 450
return 1.031 * np.exp(-1.4 / (0.9 + 9.4 / airmass)) + 0.1
def disc(ghi, solar_zenith, datetime_or_doy, pressure=101325,
min_cos_zenith=0.065, max_zenith=87, max_airmass=12):
"""
Estimate Direct Normal Irradiance from Global Horizontal Irradiance
using the DISC model.
The DISC algorithm converts global horizontal irradiance to direct
normal irradiance through empirical relationships between the global
and direct clearness indices.
The pvlib implementation limits the clearness index to 1.
The original report describing the DISC model [1]_ uses the
relative airmass rather than the absolute (pressure-corrected)
airmass. However, the NREL implementation of the DISC model [2]_
uses absolute airmass. PVLib Matlab also uses the absolute airmass.
pvlib python defaults to absolute airmass, but the relative airmass
can be used by supplying `pressure=None`.
Parameters
----------
ghi : numeric
Global horizontal irradiance in W/m^2.
solar_zenith : numeric
True (not refraction-corrected) solar zenith angles in decimal
degrees.
datetime_or_doy : int, float, array, pd.DatetimeIndex
Day of year or array of days of year e.g.
pd.DatetimeIndex.dayofyear, or pd.DatetimeIndex.
pressure : None or numeric, default 101325
Site pressure in Pascal. If None, relative airmass is used
instead of absolute (pressure-corrected) airmass.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
max_airmass : numeric, default 12
Maximum value of the airmass to allow in Kn calculation.
Default value (12) comes from range over which Kn was fit
to airmass in the original paper.
Returns
-------
output : OrderedDict or DataFrame
Contains the following keys:
* ``dni``: The modeled direct normal irradiance
in W/m^2 provided by the
Direct Insolation Simulation Code (DISC) model.
* ``kt``: Ratio of global to extraterrestrial
irradiance on a horizontal plane.
* ``airmass``: Airmass
References
----------
.. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly
Global Horizontal to Direct Normal Insolation", Technical
Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research
Institute, 1987.
.. [2] Maxwell, E. "DISC Model", Excel Worksheet.
https://www.nrel.gov/grid/solar-resource/disc.html
See Also
--------
dirint
"""
# this is the I0 calculation from the reference
# SSC uses solar constant = 1367.0 (checked 2018 08 15)
I0 = get_extra_radiation(datetime_or_doy, 1370., 'spencer')
kt = clearness_index(ghi, solar_zenith, I0, min_cos_zenith=min_cos_zenith,
max_clearness_index=1)
am = atmosphere.get_relative_airmass(solar_zenith, model='kasten1966')
if pressure is not None:
am = atmosphere.get_absolute_airmass(am, pressure)
Kn, am = _disc_kn(kt, am, max_airmass=max_airmass)
dni = Kn * I0
bad_values = (solar_zenith > max_zenith) | (ghi < 0) | (dni < 0)
dni = np.where(bad_values, 0, dni)
output = OrderedDict()
output['dni'] = dni
output['kt'] = kt
output['airmass'] = am
if isinstance(datetime_or_doy, pd.DatetimeIndex):
output = pd.DataFrame(output, index=datetime_or_doy)
return output
def _disc_kn(clearness_index, airmass, max_airmass=12):
"""
Calculate Kn for `disc`
Parameters
----------
clearness_index : numeric
airmass : numeric
max_airmass : float
airmass > max_airmass is set to max_airmass before being used
in calculating Kn.
Returns
-------
Kn : numeric
am : numeric
airmass used in the calculation of Kn. am <= max_airmass.
"""
# short names for equations
kt = clearness_index
am = airmass
am = np.minimum(am, max_airmass) # GH 450
# powers of kt will be used repeatedly, so compute only once
kt2 = kt * kt # about the same as kt ** 2
kt3 = kt2 * kt # 5-10x faster than kt ** 3
bools = (kt <= 0.6)
a = np.where(bools,
0.512 - 1.56*kt + 2.286*kt2 - 2.222*kt3,
-5.743 + 21.77*kt - 27.49*kt2 + 11.56*kt3)
b = np.where(bools,
0.37 + 0.962*kt,
41.4 - 118.5*kt + 66.05*kt2 + 31.9*kt3)
c = np.where(bools,
-0.28 + 0.932*kt - 2.048*kt2,
-47.01 + 184.2*kt - 222.0*kt2 + 73.81*kt3)
delta_kn = a + b * np.exp(c*am)
Knc = 0.866 - 0.122*am + 0.0121*am**2 - 0.000653*am**3 + 1.4e-05*am**4
Kn = Knc - delta_kn
return Kn, am
def dirint(ghi, solar_zenith, times, pressure=101325., use_delta_kt_prime=True,
temp_dew=None, min_cos_zenith=0.065, max_zenith=87):
"""
Determine DNI from GHI using the DIRINT modification of the DISC
model.
Implements the modified DISC model known as "DIRINT" introduced in
[1]. DIRINT predicts direct normal irradiance (DNI) from measured
global horizontal irradiance (GHI). DIRINT improves upon the DISC
model by using time-series GHI data and dew point temperature
information. The effectiveness of the DIRINT model improves with
each piece of information provided.
The pvlib implementation limits the clearness index to 1.
Parameters
----------
ghi : array-like
Global horizontal irradiance in W/m^2.
solar_zenith : array-like
True (not refraction-corrected) solar_zenith angles in decimal
degrees.
times : DatetimeIndex
pressure : float or array-like, default 101325.0
The site pressure in Pascal. Pressure may be measured or an
average pressure may be calculated from site altitude.
use_delta_kt_prime : bool, default True
If True, indicates that the stability index delta_kt_prime is
included in the model. The stability index adjusts the estimated
DNI in response to dynamics in the time series of GHI. It is
recommended that delta_kt_prime is not used if the time between
GHI points is 1.5 hours or greater. If use_delta_kt_prime=True,
input data must be Series.
temp_dew : None, float, or array-like, default None
Surface dew point temperatures, in degrees C. Values of temp_dew
may be numeric or NaN. Any single time period point with a
temp_dew=NaN does not have dew point improvements applied. If
temp_dew is not provided, then dew point improvements are not
applied.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
dni : array-like
The modeled direct normal irradiance in W/m^2 provided by the
DIRINT model.
Notes
-----
DIRINT model requires time series data (ie. one of the inputs must
be a vector of length > 2).
References
----------
.. [1] Perez, R., P. Ineichen, E. Maxwell, R. Seals and A. Zelenka,
(1992). "Dynamic Global-to-Direct Irradiance Conversion Models".
ASHRAE Transactions-Research Series, pp. 354-369
.. [2] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly
Global Horizontal to Direct Normal Insolation", Technical Report No.
SERI/TR-215-3087, Golden, CO: Solar Energy Research Institute, 1987.
"""
disc_out = disc(ghi, solar_zenith, times, pressure=pressure,
min_cos_zenith=min_cos_zenith, max_zenith=max_zenith)
airmass = disc_out['airmass']
kt = disc_out['kt']
kt_prime = clearness_index_zenith_independent(
kt, airmass, max_clearness_index=1)
delta_kt_prime = _delta_kt_prime_dirint(kt_prime, use_delta_kt_prime,
times)
w = _temp_dew_dirint(temp_dew, times)
dirint_coeffs = _dirint_coeffs(times, kt_prime, solar_zenith, w,
delta_kt_prime)
# Perez eqn 5
dni = disc_out['dni'] * dirint_coeffs
return dni
def _dirint_from_dni_ktprime(dni, kt_prime, solar_zenith, use_delta_kt_prime,
temp_dew):
"""
Calculate DIRINT DNI from supplied DISC DNI and Kt'.
Supports :py:func:`gti_dirint`
"""
times = dni.index
delta_kt_prime = _delta_kt_prime_dirint(kt_prime, use_delta_kt_prime,
times)
w = _temp_dew_dirint(temp_dew, times)
dirint_coeffs = _dirint_coeffs(times, kt_prime, solar_zenith, w,
delta_kt_prime)
dni_dirint = dni * dirint_coeffs
return dni_dirint
def _delta_kt_prime_dirint(kt_prime, use_delta_kt_prime, times):
"""
Calculate delta_kt_prime (Perez eqn 2 and eqn 3), or return a default value
for use with :py:func:`_dirint_bins`.
"""
if use_delta_kt_prime:
# Perez eqn 2
kt_next = kt_prime.shift(-1)
kt_previous = kt_prime.shift(1)
# replace nan with values that implement Perez Eq 3 for first and last
# positions. Use kt_previous and kt_next to handle series of length 1
kt_next.iloc[-1] = kt_previous.iloc[-1]
kt_previous.iloc[0] = kt_next.iloc[0]
delta_kt_prime = 0.5 * ((kt_prime - kt_next).abs().add(
(kt_prime - kt_previous).abs(),
fill_value=0))
else:
# do not change unless also modifying _dirint_bins
delta_kt_prime = pd.Series(-1, index=times)
return delta_kt_prime
def _temp_dew_dirint(temp_dew, times):
"""
Calculate precipitable water from surface dew point temp (Perez eqn 4),
or return a default value for use with :py:func:`_dirint_bins`.
"""
if temp_dew is not None:
# Perez eqn 4
w = pd.Series(np.exp(0.07 * temp_dew - 0.075), index=times)
else:
# do not change unless also modifying _dirint_bins
w = pd.Series(-1, index=times)
return w
def _dirint_coeffs(times, kt_prime, solar_zenith, w, delta_kt_prime):
"""
Determine the DISC to DIRINT multiplier `dirint_coeffs`.
dni = disc_out['dni'] * dirint_coeffs
Parameters
----------
times : pd.DatetimeIndex
kt_prime : Zenith-independent clearness index
solar_zenith : Solar zenith angle
w : precipitable water estimated from surface dew-point temperature
delta_kt_prime : stability index
Returns
-------
dirint_coeffs : array-like
"""
kt_prime_bin, zenith_bin, w_bin, delta_kt_prime_bin = \
_dirint_bins(times, kt_prime, solar_zenith, w, delta_kt_prime)
# get the coefficients
coeffs = _get_dirint_coeffs()
# subtract 1 to account for difference between MATLAB-style bin
# assignment and Python-style array lookup.
dirint_coeffs = coeffs[kt_prime_bin-1, zenith_bin-1,
delta_kt_prime_bin-1, w_bin-1]
# convert unassigned bins to nan
dirint_coeffs = np.where((kt_prime_bin == 0) | (zenith_bin == 0) |
(w_bin == 0) | (delta_kt_prime_bin == 0),
np.nan, dirint_coeffs)
return dirint_coeffs
def _dirint_bins(times, kt_prime, zenith, w, delta_kt_prime):
"""
Determine the bins for the DIRINT coefficients.
Parameters
----------
times : pd.DatetimeIndex
kt_prime : Zenith-independent clearness index
zenith : Solar zenith angle
w : precipitable water estimated from surface dew-point temperature
delta_kt_prime : stability index
Returns
-------
tuple of kt_prime_bin, zenith_bin, w_bin, delta_kt_prime_bin
"""
# @wholmgren: the following bin assignments use MATLAB's 1-indexing.
# Later, we'll subtract 1 to conform to Python's 0-indexing.
# Create kt_prime bins
kt_prime_bin = pd.Series(0, index=times, dtype=np.int64)
kt_prime_bin[(kt_prime >= 0) & (kt_prime < 0.24)] = 1
kt_prime_bin[(kt_prime >= 0.24) & (kt_prime < 0.4)] = 2
kt_prime_bin[(kt_prime >= 0.4) & (kt_prime < 0.56)] = 3
kt_prime_bin[(kt_prime >= 0.56) & (kt_prime < 0.7)] = 4
kt_prime_bin[(kt_prime >= 0.7) & (kt_prime < 0.8)] = 5
kt_prime_bin[(kt_prime >= 0.8) & (kt_prime <= 1)] = 6
# Create zenith angle bins
zenith_bin = pd.Series(0, index=times, dtype=np.int64)
zenith_bin[(zenith >= 0) & (zenith < 25)] = 1
zenith_bin[(zenith >= 25) & (zenith < 40)] = 2
zenith_bin[(zenith >= 40) & (zenith < 55)] = 3
zenith_bin[(zenith >= 55) & (zenith < 70)] = 4
zenith_bin[(zenith >= 70) & (zenith < 80)] = 5
zenith_bin[(zenith >= 80)] = 6
# Create the bins for w based on dew point temperature
w_bin = pd.Series(0, index=times, dtype=np.int64)
w_bin[(w >= 0) & (w < 1)] = 1
w_bin[(w >= 1) & (w < 2)] = 2
w_bin[(w >= 2) & (w < 3)] = 3
w_bin[(w >= 3)] = 4
w_bin[(w == -1)] = 5
# Create delta_kt_prime binning.
delta_kt_prime_bin = pd.Series(0, index=times, dtype=np.int64)
delta_kt_prime_bin[(delta_kt_prime >= 0) & (delta_kt_prime < 0.015)] = 1
delta_kt_prime_bin[(delta_kt_prime >= 0.015) &
(delta_kt_prime < 0.035)] = 2
delta_kt_prime_bin[(delta_kt_prime >= 0.035) & (delta_kt_prime < 0.07)] = 3
delta_kt_prime_bin[(delta_kt_prime >= 0.07) & (delta_kt_prime < 0.15)] = 4
delta_kt_prime_bin[(delta_kt_prime >= 0.15) & (delta_kt_prime < 0.3)] = 5
delta_kt_prime_bin[(delta_kt_prime >= 0.3) & (delta_kt_prime <= 1)] = 6
delta_kt_prime_bin[delta_kt_prime == -1] = 7
return kt_prime_bin, zenith_bin, w_bin, delta_kt_prime_bin
def dirindex(ghi, ghi_clearsky, dni_clearsky, zenith, times, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, min_cos_zenith=0.065,
max_zenith=87):
"""
Determine DNI from GHI using the DIRINDEX model.
The DIRINDEX model [1] modifies the DIRINT model implemented in
:py:func:``pvlib.irradiance.dirint`` by taking into account information
from a clear sky model. It is recommended that ``ghi_clearsky`` be
calculated using the Ineichen clear sky model
:py:func:``pvlib.clearsky.ineichen`` with ``perez_enhancement=True``.
The pvlib implementation limits the clearness index to 1.
Parameters
----------
ghi : array-like
Global horizontal irradiance in W/m^2.
ghi_clearsky : array-like
Global horizontal irradiance from clear sky model, in W/m^2.
dni_clearsky : array-like
Direct normal irradiance from clear sky model, in W/m^2.
zenith : array-like
True (not refraction-corrected) zenith angles in decimal
degrees. If Z is a vector it must be of the same size as all
other vector inputs. Z must be >=0 and <=180.
times : DatetimeIndex
pressure : float or array-like, default 101325.0
The site pressure in Pascal. Pressure may be measured or an
average pressure may be calculated from site altitude.
use_delta_kt_prime : bool, default True
If True, indicates that the stability index delta_kt_prime is
included in the model. The stability index adjusts the estimated
DNI in response to dynamics in the time series of GHI. It is
recommended that delta_kt_prime is not used if the time between
GHI points is 1.5 hours or greater. If use_delta_kt_prime=True,
input data must be Series.
temp_dew : None, float, or array-like, default None
Surface dew point temperatures, in degrees C. Values of temp_dew
may be numeric or NaN. Any single time period point with a
temp_dew=NaN does not have dew point improvements applied. If
temp_dew is not provided, then dew point improvements are not
applied.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
dni : array-like
The modeled direct normal irradiance in W/m^2.
Notes
-----
DIRINDEX model requires time series data (ie. one of the inputs must
be a vector of length > 2).
References
----------
.. [1] Perez, R., Ineichen, P., Moore, K., Kmiecik, M., Chain, C., George,
R., & Vignola, F. (2002). A new operational model for satellite-derived
irradiances: description and validation. Solar Energy, 73(5), 307-317.
"""
dni_dirint = dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew, min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirint_clearsky = dirint(ghi_clearsky, zenith, times,
pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime,
temp_dew=temp_dew,
min_cos_zenith=min_cos_zenith,
max_zenith=max_zenith)
dni_dirindex = dni_clearsky * dni_dirint / dni_dirint_clearsky
dni_dirindex[dni_dirindex < 0] = 0.
return dni_dirindex
def gti_dirint(poa_global, aoi, solar_zenith, solar_azimuth, times,
surface_tilt, surface_azimuth, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, albedo=.25,
model='perez', model_perez='allsitescomposite1990',
calculate_gt_90=True, max_iterations=30):
"""
Determine GHI, DNI, DHI from POA global using the GTI DIRINT model.
The GTI DIRINT model is described in [1]_.
.. warning::
Model performance is poor for AOI greater than approximately
80 degrees `and` plane of array irradiance greater than
approximately 200 W/m^2.
Parameters
----------
poa_global : array-like
Plane of array global irradiance in W/m^2.
aoi : array-like
Angle of incidence of solar rays with respect to the module
surface normal.
solar_zenith : array-like
True (not refraction-corrected) solar zenith angles in decimal
degrees.
solar_azimuth : array-like
Solar azimuth angles in decimal degrees.
times : DatetimeIndex
Time indices for the input array-like data.
surface_tilt : numeric
Surface tilt angles in decimal degrees. Tilt must be >=0 and
<=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90).
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The Azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
pressure : numeric, default 101325.0
The site pressure in Pascal. Pressure may be measured or an
average pressure may be calculated from site altitude.
use_delta_kt_prime : bool, default True
If True, indicates that the stability index delta_kt_prime is
included in the model. The stability index adjusts the estimated
DNI in response to dynamics in the time series of GHI. It is
recommended that delta_kt_prime is not used if the time between
GHI points is 1.5 hours or greater. If use_delta_kt_prime=True,
input data must be Series.
temp_dew : None, float, or array-like, default None
Surface dew point temperatures, in degrees C. Values of temp_dew
may be numeric or NaN. Any single time period point with a
temp_dew=NaN does not have dew point improvements applied. If
temp_dew is not provided, then dew point improvements are not
applied.
albedo : numeric, default 0.25
Surface albedo
model : String, default 'isotropic'
Irradiance model.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`perez`.
calculate_gt_90 : bool, default True
Controls if the algorithm evaluates inputs with AOI >= 90 degrees.
If False, returns nan for AOI >= 90 degrees. Significant speed ups
can be achieved by setting this parameter to False.
max_iterations : int, default 30
Maximum number of iterations for the aoi < 90 deg algorithm.
Returns
-------
data : OrderedDict or DataFrame
Contains the following keys/columns:
* ``ghi``: the modeled global horizontal irradiance in W/m^2.
* ``dni``: the modeled direct normal irradiance in W/m^2.
* ``dhi``: the modeled diffuse horizontal irradiance in
W/m^2.
References
----------
.. [1] B. Marion, A model for deriving the direct normal and
diffuse horizontal irradiance from the global tilted
irradiance, Solar Energy 122, 1037-1046.
:doi:`10.1016/j.solener.2015.10.024`
"""
aoi_lt_90 = aoi < 90
# for AOI less than 90 degrees
ghi, dni, dhi, kt_prime = _gti_dirint_lt_90(
poa_global, aoi, aoi_lt_90, solar_zenith, solar_azimuth, times,
surface_tilt, surface_azimuth, pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime, temp_dew=temp_dew,
albedo=albedo, model=model, model_perez=model_perez,
max_iterations=max_iterations)
# for AOI greater than or equal to 90 degrees
if calculate_gt_90:
ghi_gte_90, dni_gte_90, dhi_gte_90 = _gti_dirint_gte_90(
poa_global, aoi, solar_zenith, solar_azimuth,
surface_tilt, times, kt_prime,
pressure=pressure, temp_dew=temp_dew, albedo=albedo)
else:
ghi_gte_90, dni_gte_90, dhi_gte_90 = np.nan, np.nan, np.nan
# put the AOI < 90 and AOI >= 90 conditions together
output = OrderedDict()
output['ghi'] = ghi.where(aoi_lt_90, ghi_gte_90)
output['dni'] = dni.where(aoi_lt_90, dni_gte_90)
output['dhi'] = dhi.where(aoi_lt_90, dhi_gte_90)
output = pd.DataFrame(output, index=times)
return output
def _gti_dirint_lt_90(poa_global, aoi, aoi_lt_90, solar_zenith, solar_azimuth,
times, surface_tilt, surface_azimuth, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, albedo=.25,
model='perez', model_perez='allsitescomposite1990',
max_iterations=30):
"""
GTI-DIRINT model for AOI < 90 degrees. See Marion 2015 Section 2.1.
See gti_dirint signature for parameter details.
"""
I0 = get_extra_radiation(times, 1370, 'spencer')
cos_zenith = tools.cosd(solar_zenith)
# I0h as in Marion 2015 eqns 1, 3
I0h = I0 * np.maximum(0.065, cos_zenith)
airmass = atmosphere.get_relative_airmass(solar_zenith, model='kasten1966')
airmass = atmosphere.get_absolute_airmass(airmass, pressure)
# these coeffs and diff variables and the loop below
# implement figure 1 of Marion 2015
# make coeffs that is at least 30 elements long so that all
# coeffs can be assigned as specified in Marion 2015.
# slice below will limit iterations if necessary
coeffs = np.empty(max(30, max_iterations))
coeffs[0:3] = 1
coeffs[3:10] = 0.5
coeffs[10:20] = 0.25
coeffs[20:] = 0.125
coeffs = coeffs[:max_iterations] # covers case where max_iterations < 30
# initialize diff
diff = pd.Series(9999, index=times)
best_diff = diff
# initialize poa_global_i
poa_global_i = poa_global
for iteration, coeff in enumerate(coeffs):
# test if difference between modeled GTI and
# measured GTI (poa_global) is less than 1 W/m^2
# only test for aoi less than 90 deg
best_diff_lte_1 = best_diff <= 1
best_diff_lte_1_lt_90 = best_diff_lte_1[aoi_lt_90]
if best_diff_lte_1_lt_90.all():
# all aoi < 90 points have a difference <= 1, so break loop
break
# calculate kt and DNI from GTI
kt = clearness_index(poa_global_i, aoi, I0) # kt from Marion eqn 2
disc_dni = np.maximum(_disc_kn(kt, airmass)[0] * I0, 0)
kt_prime = clearness_index_zenith_independent(kt, airmass)
# dirint DNI in Marion eqn 3
dni = _dirint_from_dni_ktprime(disc_dni, kt_prime, solar_zenith,
use_delta_kt_prime, temp_dew)
# calculate DHI using Marion eqn 3 (identify 1st term on RHS as GHI)
# I0h has a minimum zenith projection, but multiplier of DNI does not
ghi = kt * I0h # Kt * I0 * max(0.065, cos(zen))
dhi = ghi - dni * cos_zenith # no cos(zen) restriction here
# following SSC code
dni = np.maximum(dni, 0)
ghi = np.maximum(ghi, 0)
dhi = np.maximum(dhi, 0)
# use DNI and DHI to model GTI
# GTI-DIRINT uses perez transposition model, but we allow for
# any model here
all_irrad = get_total_irradiance(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, ghi, dhi, dni_extra=I0, airmass=airmass,
albedo=albedo, model=model, model_perez=model_perez)
gti_model = all_irrad['poa_global']
# calculate new diff
diff = gti_model - poa_global
# determine if the new diff is smaller in magnitude
# than the old diff
diff_abs = diff.abs()
smallest_diff = diff_abs < best_diff
# save the best differences
best_diff = diff_abs.where(smallest_diff, best_diff)
# on first iteration, the best values are the only values
if iteration == 0:
best_ghi = ghi
best_dni = dni
best_dhi = dhi
best_kt_prime = kt_prime
else:
# save new DNI, DHI, DHI if they provide the best consistency
# otherwise use the older values.
best_ghi = ghi.where(smallest_diff, best_ghi)
best_dni = dni.where(smallest_diff, best_dni)
best_dhi = dhi.where(smallest_diff, best_dhi)
best_kt_prime = kt_prime.where(smallest_diff, best_kt_prime)
# calculate adjusted inputs for next iteration. Marion eqn 4
poa_global_i = np.maximum(1.0, poa_global_i - coeff * diff)
else:
# we are here because we ran out of coeffs to loop over and
# therefore we have exceeded max_iterations
import warnings
failed_points = best_diff[aoi_lt_90][~best_diff_lte_1_lt_90]
warnings.warn(
('%s points failed to converge after %s iterations. best_diff:\n%s'
% (len(failed_points), max_iterations, failed_points)),
RuntimeWarning)
# return the best data, whether or not the solution converged
return best_ghi, best_dni, best_dhi, best_kt_prime
def _gti_dirint_gte_90(poa_global, aoi, solar_zenith, solar_azimuth,
surface_tilt, times, kt_prime,
pressure=101325., temp_dew=None, albedo=.25):
"""
GTI-DIRINT model for AOI >= 90 degrees. See Marion 2015 Section 2.2.
See gti_dirint signature for parameter details.
"""
kt_prime_gte_90 = _gti_dirint_gte_90_kt_prime(aoi, solar_zenith,
solar_azimuth, times,
kt_prime)
I0 = get_extra_radiation(times, 1370, 'spencer')
airmass = atmosphere.get_relative_airmass(solar_zenith, model='kasten1966')
airmass = atmosphere.get_absolute_airmass(airmass, pressure)
kt = kt_prime_gte_90 * _kt_kt_prime_factor(airmass)
disc_dni = np.maximum(_disc_kn(kt, airmass)[0] * I0, 0)
dni_gte_90 = _dirint_from_dni_ktprime(disc_dni, kt_prime, solar_zenith,
False, temp_dew)
dni_gte_90_proj = dni_gte_90 * tools.cosd(solar_zenith)
cos_surface_tilt = tools.cosd(surface_tilt)
# isotropic sky plus ground diffuse
dhi_gte_90 = (
(2 * poa_global - dni_gte_90_proj * albedo * (1 - cos_surface_tilt)) /
(1 + cos_surface_tilt + albedo * (1 - cos_surface_tilt)))
ghi_gte_90 = dni_gte_90_proj + dhi_gte_90
return ghi_gte_90, dni_gte_90, dhi_gte_90
def _gti_dirint_gte_90_kt_prime(aoi, solar_zenith, solar_azimuth, times,
kt_prime):
"""
Determine kt' values to be used in GTI-DIRINT AOI >= 90 deg case.
See Marion 2015 Section 2.2.
For AOI >= 90 deg: average of the kt_prime values for 65 < AOI < 80
in each day's morning and afternoon. Morning and afternoon are treated
separately.
For AOI < 90 deg: NaN.
See gti_dirint signature for parameter details.
Returns
-------
kt_prime_gte_90 : Series
Index is `times`.
"""
# kt_prime values from DIRINT calculation for AOI < 90 case
# set the kt_prime from sunrise to AOI=90 to be equal to
# the kt_prime for 65 < AOI < 80 during the morning.
# similar for the afternoon. repeat for every day.
aoi_gte_90 = aoi >= 90
aoi_65_80 = (aoi > 65) & (aoi < 80)
zenith_lt_90 = solar_zenith < 90
morning = solar_azimuth < 180
afternoon = solar_azimuth > 180
aoi_65_80_morning = aoi_65_80 & morning
aoi_65_80_afternoon = aoi_65_80 & afternoon
zenith_lt_90_aoi_gte_90_morning = zenith_lt_90 & aoi_gte_90 & morning
zenith_lt_90_aoi_gte_90_afternoon = zenith_lt_90 & aoi_gte_90 & afternoon
kt_prime_gte_90 = []
for date, data in kt_prime.groupby(times.date):
kt_prime_am_avg = data[aoi_65_80_morning].mean()
kt_prime_pm_avg = data[aoi_65_80_afternoon].mean()
kt_prime_by_date = pd.Series(np.nan, index=data.index)
kt_prime_by_date[zenith_lt_90_aoi_gte_90_morning] = kt_prime_am_avg
kt_prime_by_date[zenith_lt_90_aoi_gte_90_afternoon] = kt_prime_pm_avg
kt_prime_gte_90.append(kt_prime_by_date)
kt_prime_gte_90 = pd.concat(kt_prime_gte_90)
return kt_prime_gte_90
def erbs(ghi, zenith, datetime_or_doy, min_cos_zenith=0.065, max_zenith=87):
r"""
Estimate DNI and DHI from GHI using the Erbs model.
The Erbs model [1]_ estimates the diffuse fraction DF from global
horizontal irradiance through an empirical relationship between DF
and the ratio of GHI to extraterrestrial irradiance, Kt. The
function uses the diffuse fraction to compute DHI as
.. math::
DHI = DF \times GHI
DNI is then estimated as
.. math::
DNI = (GHI - DHI)/\cos(Z)
where Z is the zenith angle.
Parameters
----------
ghi: numeric
Global horizontal irradiance in W/m^2.
zenith: numeric
True (not refraction-corrected) zenith angles in decimal degrees.
datetime_or_doy : int, float, array, pd.DatetimeIndex
Day of year or array of days of year e.g.
pd.DatetimeIndex.dayofyear, or pd.DatetimeIndex.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
data : OrderedDict or DataFrame
Contains the following keys/columns:
* ``dni``: the modeled direct normal irradiance in W/m^2.
* ``dhi``: the modeled diffuse horizontal irradiance in
W/m^2.
* ``kt``: Ratio of global to extraterrestrial irradiance
on a horizontal plane.
References
----------
.. [1] D. G. Erbs, S. A. Klein and J. A. Duffie, Estimation of the
diffuse radiation fraction for hourly, daily and monthly-average
global radiation, Solar Energy 28(4), pp 293-302, 1982. Eq. 1
See also
--------
dirint
disc
"""
dni_extra = get_extra_radiation(datetime_or_doy)
kt = clearness_index(ghi, zenith, dni_extra, min_cos_zenith=min_cos_zenith,
max_clearness_index=1)
# For Kt <= 0.22, set the diffuse fraction
df = 1 - 0.09*kt
# For Kt > 0.22 and Kt <= 0.8, set the diffuse fraction
df = np.where((kt > 0.22) & (kt <= 0.8),
0.9511 - 0.1604*kt + 4.388*kt**2 -
16.638*kt**3 + 12.336*kt**4,
df)
# For Kt > 0.8, set the diffuse fraction
df = np.where(kt > 0.8, 0.165, df)
dhi = df * ghi
dni = (ghi - dhi) / tools.cosd(zenith)
bad_values = (zenith > max_zenith) | (ghi < 0) | (dni < 0)
dni = np.where(bad_values, 0, dni)
# ensure that closure relationship remains valid
dhi = np.where(bad_values, ghi, dhi)
data = OrderedDict()
data['dni'] = dni
data['dhi'] = dhi
data['kt'] = kt
if isinstance(datetime_or_doy, pd.DatetimeIndex):
data = pd.DataFrame(data, index=datetime_or_doy)
return data
def liujordan(zenith, transmittance, airmass, dni_extra=1367.0):
'''
Determine DNI, DHI, GHI from extraterrestrial flux, transmittance,
and optical air mass number.
Liu and Jordan, 1960, developed a simplified direct radiation model.
DHI is from an empirical equation for diffuse radiation from Liu and
Jordan, 1960.
Parameters
----------
zenith: pd.Series
True (not refraction-corrected) zenith angles in decimal
degrees. If Z is a vector it must be of the same size as all
other vector inputs. Z must be >=0 and <=180.
transmittance: float
Atmospheric transmittance between 0 and 1.
pressure: float, default 101325.0
Air pressure
dni_extra: float, default 1367.0
Direct irradiance incident at the top of the atmosphere.
Returns
-------
irradiance: DataFrame
Modeled direct normal irradiance, direct horizontal irradiance,
and global horizontal irradiance in W/m^2
References
----------
.. [1] Campbell, G. S., J. M. Norman (1998) An Introduction to
Environmental Biophysics. 2nd Ed. New York: Springer.
.. [2] Liu, B. Y., R. C. Jordan, (1960). "The interrelationship and
characteristic distribution of direct, diffuse, and total solar
radiation". Solar Energy 4:1-19
'''
tau = transmittance
dni = dni_extra*tau**airmass
dhi = 0.3 * (1.0 - tau**airmass) * dni_extra * np.cos(np.radians(zenith))
ghi = dhi + dni * np.cos(np.radians(zenith))
irrads = OrderedDict()
irrads['ghi'] = ghi
irrads['dni'] = dni
irrads['dhi'] = dhi
if isinstance(ghi, pd.Series):
irrads = pd.DataFrame(irrads)
return irrads
def _get_perez_coefficients(perezmodel):
'''
Find coefficients for the Perez model
Parameters
----------
perezmodel : string (optional, default='allsitescomposite1990')
a character string which selects the desired set of Perez
coefficients. If model is not provided as an input, the default,
'1990' will be used.
All possible model selections are:
* '1990'
* 'allsitescomposite1990' (same as '1990')
* 'allsitescomposite1988'
* 'sandiacomposite1988'
* 'usacomposite1988'
* 'france1988'
* 'phoenix1988'
* 'elmonte1988'
* 'osage1988'
* 'albuquerque1988'
* 'capecanaveral1988'
* 'albany1988'
Returns
--------
F1coeffs, F2coeffs : (array, array)
F1 and F2 coefficients for the Perez model
References
----------
.. [1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
.. [2] Perez, R., Seals, R., Ineichen, P., Stewart, R., Menicucci, D.,
1987. A new simplified version of the Perez diffuse irradiance model
for tilted surfaces. Solar Energy 39(3), 221-232.
.. [3] Perez, R., Ineichen, P., Seals, R., Michalsky, J., Stewart, R.,
1990. Modeling daylight availability and irradiance components from
direct and global irradiance. Solar Energy 44 (5), 271-289.
.. [4] Perez, R. et. al 1988. "The Development and Verification of the
Perez Diffuse Radiation Model". SAND88-7030
'''
coeffdict = {
'allsitescomposite1990': [
[-0.0080, 0.5880, -0.0620, -0.0600, 0.0720, -0.0220],
[0.1300, 0.6830, -0.1510, -0.0190, 0.0660, -0.0290],
[0.3300, 0.4870, -0.2210, 0.0550, -0.0640, -0.0260],
[0.5680, 0.1870, -0.2950, 0.1090, -0.1520, -0.0140],
[0.8730, -0.3920, -0.3620, 0.2260, -0.4620, 0.0010],
[1.1320, -1.2370, -0.4120, 0.2880, -0.8230, 0.0560],
[1.0600, -1.6000, -0.3590, 0.2640, -1.1270, 0.1310],
[0.6780, -0.3270, -0.2500, 0.1560, -1.3770, 0.2510]],
'allsitescomposite1988': [
[-0.0180, 0.7050, -0.071, -0.0580, 0.1020, -0.0260],
[0.1910, 0.6450, -0.1710, 0.0120, 0.0090, -0.0270],
[0.4400, 0.3780, -0.2560, 0.0870, -0.1040, -0.0250],
[0.7560, -0.1210, -0.3460, 0.1790, -0.3210, -0.0080],
[0.9960, -0.6450, -0.4050, 0.2600, -0.5900, 0.0170],
[1.0980, -1.2900, -0.3930, 0.2690, -0.8320, 0.0750],
[0.9730, -1.1350, -0.3780, 0.1240, -0.2580, 0.1490],
[0.6890, -0.4120, -0.2730, 0.1990, -1.6750, 0.2370]],
'sandiacomposite1988': [
[-0.1960, 1.0840, -0.0060, -0.1140, 0.1800, -0.0190],
[0.2360, 0.5190, -0.1800, -0.0110, 0.0200, -0.0380],
[0.4540, 0.3210, -0.2550, 0.0720, -0.0980, -0.0460],
[0.8660, -0.3810, -0.3750, 0.2030, -0.4030, -0.0490],
[1.0260, -0.7110, -0.4260, 0.2730, -0.6020, -0.0610],
[0.9780, -0.9860, -0.3500, 0.2800, -0.9150, -0.0240],
[0.7480, -0.9130, -0.2360, 0.1730, -1.0450, 0.0650],
[0.3180, -0.7570, 0.1030, 0.0620, -1.6980, 0.2360]],
'usacomposite1988': [
[-0.0340, 0.6710, -0.0590, -0.0590, 0.0860, -0.0280],
[0.2550, 0.4740, -0.1910, 0.0180, -0.0140, -0.0330],
[0.4270, 0.3490, -0.2450, 0.0930, -0.1210, -0.0390],
[0.7560, -0.2130, -0.3280, 0.1750, -0.3040, -0.0270],
[1.0200, -0.8570, -0.3850, 0.2800, -0.6380, -0.0190],
[1.0500, -1.3440, -0.3480, 0.2800, -0.8930, 0.0370],
[0.9740, -1.5070, -0.3700, 0.1540, -0.5680, 0.1090],
[0.7440, -1.8170, -0.2560, 0.2460, -2.6180, 0.2300]],
'france1988': [
[0.0130, 0.7640, -0.1000, -0.0580, 0.1270, -0.0230],
[0.0950, 0.9200, -0.1520, 0, 0.0510, -0.0200],
[0.4640, 0.4210, -0.2800, 0.0640, -0.0510, -0.0020],
[0.7590, -0.0090, -0.3730, 0.2010, -0.3820, 0.0100],
[0.9760, -0.4000, -0.4360, 0.2710, -0.6380, 0.0510],
[1.1760, -1.2540, -0.4620, 0.2950, -0.9750, 0.1290],
[1.1060, -1.5630, -0.3980, 0.3010, -1.4420, 0.2120],
[0.9340, -1.5010, -0.2710, 0.4200, -2.9170, 0.2490]],
'phoenix1988': [
[-0.0030, 0.7280, -0.0970, -0.0750, 0.1420, -0.0430],
[0.2790, 0.3540, -0.1760, 0.0300, -0.0550, -0.0540],
[0.4690, 0.1680, -0.2460, 0.0480, -0.0420, -0.0570],
[0.8560, -0.5190, -0.3400, 0.1760, -0.3800, -0.0310],
[0.9410, -0.6250, -0.3910, 0.1880, -0.3600, -0.0490],
[1.0560, -1.1340, -0.4100, 0.2810, -0.7940, -0.0650],
[0.9010, -2.1390, -0.2690, 0.1180, -0.6650, 0.0460],
[0.1070, 0.4810, 0.1430, -0.1110, -0.1370, 0.2340]],
'elmonte1988': [
[0.0270, 0.7010, -0.1190, -0.0580, 0.1070, -0.0600],
[0.1810, 0.6710, -0.1780, -0.0790, 0.1940, -0.0350],
[0.4760, 0.4070, -0.2880, 0.0540, -0.0320, -0.0550],
[0.8750, -0.2180, -0.4030, 0.1870, -0.3090, -0.0610],
[1.1660, -1.0140, -0.4540, 0.2110, -0.4100, -0.0440],
[1.1430, -2.0640, -0.2910, 0.0970, -0.3190, 0.0530],
[1.0940, -2.6320, -0.2590, 0.0290, -0.4220, 0.1470],
[0.1550, 1.7230, 0.1630, -0.1310, -0.0190, 0.2770]],
'osage1988': [
[-0.3530, 1.4740, 0.0570, -0.1750, 0.3120, 0.0090],
[0.3630, 0.2180, -0.2120, 0.0190, -0.0340, -0.0590],
[-0.0310, 1.2620, -0.0840, -0.0820, 0.2310, -0.0170],
[0.6910, 0.0390, -0.2950, 0.0910, -0.1310, -0.0350],
[1.1820, -1.3500, -0.3210, 0.4080, -0.9850, -0.0880],
[0.7640, 0.0190, -0.2030, 0.2170, -0.2940, -0.1030],
[0.2190, 1.4120, 0.2440, 0.4710, -2.9880, 0.0340],
[3.5780, 22.2310, -10.7450, 2.4260, 4.8920, -5.6870]],
'albuquerque1988': [
[0.0340, 0.5010, -0.0940, -0.0630, 0.1060, -0.0440],
[0.2290, 0.4670, -0.1560, -0.0050, -0.0190, -0.0230],
[0.4860, 0.2410, -0.2530, 0.0530, -0.0640, -0.0220],
[0.8740, -0.3930, -0.3970, 0.1810, -0.3270, -0.0370],
[1.1930, -1.2960, -0.5010, 0.2810, -0.6560, -0.0450],
[1.0560, -1.7580, -0.3740, 0.2260, -0.7590, 0.0340],
[0.9010, -4.7830, -0.1090, 0.0630, -0.9700, 0.1960],
[0.8510, -7.0550, -0.0530, 0.0600, -2.8330, 0.3300]],
'capecanaveral1988': [
[0.0750, 0.5330, -0.1240, -0.0670, 0.0420, -0.0200],
[0.2950, 0.4970, -0.2180, -0.0080, 0.0030, -0.0290],
[0.5140, 0.0810, -0.2610, 0.0750, -0.1600, -0.0290],
[0.7470, -0.3290, -0.3250, 0.1810, -0.4160, -0.0300],
[0.9010, -0.8830, -0.2970, 0.1780, -0.4890, 0.0080],
[0.5910, -0.0440, -0.1160, 0.2350, -0.9990, 0.0980],
[0.5370, -2.4020, 0.3200, 0.1690, -1.9710, 0.3100],
[-0.8050, 4.5460, 1.0720, -0.2580, -0.9500, 0.7530]],
'albany1988': [
[0.0120, 0.5540, -0.0760, -0.0520, 0.0840, -0.0290],
[0.2670, 0.4370, -0.1940, 0.0160, 0.0220, -0.0360],
[0.4200, 0.3360, -0.2370, 0.0740, -0.0520, -0.0320],
[0.6380, -0.0010, -0.2810, 0.1380, -0.1890, -0.0120],
[1.0190, -1.0270, -0.3420, 0.2710, -0.6280, 0.0140],
[1.1490, -1.9400, -0.3310, 0.3220, -1.0970, 0.0800],
[1.4340, -3.9940, -0.4920, 0.4530, -2.3760, 0.1170],
[1.0070, -2.2920, -0.4820, 0.3900, -3.3680, 0.2290]], }
array = np.array(coeffdict[perezmodel])
F1coeffs = array[:, 0:3]
F2coeffs = array[:, 3:7]
return F1coeffs, F2coeffs
def _get_dirint_coeffs():
"""
A place to stash the dirint coefficients.
Returns
-------
np.array with shape ``(6, 6, 7, 5)``.
Ordering is ``[kt_prime_bin, zenith_bin, delta_kt_prime_bin, w_bin]``
"""
# To allow for maximum copy/paste from the MATLAB 1-indexed code,
# we create and assign values to an oversized array.
# Then, we return the [1:, 1:, :, :] slice.
coeffs = np.zeros((7, 7, 7, 5))
coeffs[1, 1, :, :] = [
[0.385230, 0.385230, 0.385230, 0.462880, 0.317440],
[0.338390, 0.338390, 0.221270, 0.316730, 0.503650],
[0.235680, 0.235680, 0.241280, 0.157830, 0.269440],
[0.830130, 0.830130, 0.171970, 0.841070, 0.457370],
[0.548010, 0.548010, 0.478000, 0.966880, 1.036370],
[0.548010, 0.548010, 1.000000, 3.012370, 1.976540],
[0.582690, 0.582690, 0.229720, 0.892710, 0.569950]]
coeffs[1, 2, :, :] = [
[0.131280, 0.131280, 0.385460, 0.511070, 0.127940],
[0.223710, 0.223710, 0.193560, 0.304560, 0.193940],
[0.229970, 0.229970, 0.275020, 0.312730, 0.244610],
[0.090100, 0.184580, 0.260500, 0.687480, 0.579440],
[0.131530, 0.131530, 0.370190, 1.380350, 1.052270],
[1.116250, 1.116250, 0.928030, 3.525490, 2.316920],
[0.090100, 0.237000, 0.300040, 0.812470, 0.664970]]
coeffs[1, 3, :, :] = [
[0.587510, 0.130000, 0.400000, 0.537210, 0.832490],
[0.306210, 0.129830, 0.204460, 0.500000, 0.681640],
[0.224020, 0.260620, 0.334080, 0.501040, 0.350470],
[0.421540, 0.753970, 0.750660, 3.706840, 0.983790],
[0.706680, 0.373530, 1.245670, 0.864860, 1.992630],
[4.864400, 0.117390, 0.265180, 0.359180, 3.310820],
[0.392080, 0.493290, 0.651560, 1.932780, 0.898730]]
coeffs[1, 4, :, :] = [
[0.126970, 0.126970, 0.126970, 0.126970, 0.126970],
[0.810820, 0.810820, 0.810820, 0.810820, 0.810820],
[3.241680, 2.500000, 2.291440, 2.291440, 2.291440],
[4.000000, 3.000000, 2.000000, 0.975430, 1.965570],
[12.494170, 12.494170, 8.000000, 5.083520, 8.792390],
[21.744240, 21.744240, 21.744240, 21.744240, 21.744240],
[3.241680, 12.494170, 1.620760, 1.375250, 2.331620]]
coeffs[1, 5, :, :] = [
[0.126970, 0.126970, 0.126970, 0.126970, 0.126970],
[0.810820, 0.810820, 0.810820, 0.810820, 0.810820],
[3.241680, 2.500000, 2.291440, 2.291440, 2.291440],
[4.000000, 3.000000, 2.000000, 0.975430, 1.965570],
[12.494170, 12.494170, 8.000000, 5.083520, 8.792390],
[21.744240, 21.744240, 21.744240, 21.744240, 21.744240],
[3.241680, 12.494170, 1.620760, 1.375250, 2.331620]]
coeffs[1, 6, :, :] = [
[0.126970, 0.126970, 0.126970, 0.126970, 0.126970],
[0.810820, 0.810820, 0.810820, 0.810820, 0.810820],
[3.241680, 2.500000, 2.291440, 2.291440, 2.291440],
[4.000000, 3.000000, 2.000000, 0.975430, 1.965570],
[12.494170, 12.494170, 8.000000, 5.083520, 8.792390],
[21.744240, 21.744240, 21.744240, 21.744240, 21.744240],
[3.241680, 12.494170, 1.620760, 1.375250, 2.331620]]
coeffs[2, 1, :, :] = [
[0.337440, 0.337440, 0.969110, 1.097190, 1.116080],
[0.337440, 0.337440, 0.969110, 1.116030, 0.623900],
[0.337440, 0.337440, 1.530590, 1.024420, 0.908480],
[0.584040, 0.584040, 0.847250, 0.914940, 1.289300],
[0.337440, 0.337440, 0.310240, 1.435020, 1.852830],
[0.337440, 0.337440, 1.015010, 1.097190, 2.117230],
[0.337440, 0.337440, 0.969110, 1.145730, 1.476400]]
coeffs[2, 2, :, :] = [
[0.300000, 0.300000, 0.700000, 1.100000, 0.796940],
[0.219870, 0.219870, 0.526530, 0.809610, 0.649300],
[0.386650, 0.386650, 0.119320, 0.576120, 0.685460],
[0.746730, 0.399830, 0.470970, 0.986530, 0.785370],
[0.575420, 0.936700, 1.649200, 1.495840, 1.335590],
[1.319670, 4.002570, 1.276390, 2.644550, 2.518670],
[0.665190, 0.678910, 1.012360, 1.199940, 0.986580]]
coeffs[2, 3, :, :] = [
[0.378870, 0.974060, 0.500000, 0.491880, 0.665290],
[0.105210, 0.263470, 0.407040, 0.553460, 0.582590],
[0.312900, 0.345240, 1.144180, 0.854790, 0.612280],
[0.119070, 0.365120, 0.560520, 0.793720, 0.802600],
[0.781610, 0.837390, 1.270420, 1.537980, 1.292950],
[1.152290, 1.152290, 1.492080, 1.245370, 2.177100],
[0.424660, 0.529550, 0.966910, 1.033460, 0.958730]]
coeffs[2, 4, :, :] = [
[0.310590, 0.714410, 0.252450, 0.500000, 0.607600],
[0.975190, 0.363420, 0.500000, 0.400000, 0.502800],
[0.175580, 0.196250, 0.476360, 1.072470, 0.490510],
[0.719280, 0.698620, 0.657770, 1.190840, 0.681110],
[0.426240, 1.464840, 0.678550, 1.157730, 0.978430],
[2.501120, 1.789130, 1.387090, 2.394180, 2.394180],
[0.491640, 0.677610, 0.685610, 1.082400, 0.735410]]
coeffs[2, 5, :, :] = [
[0.597000, 0.500000, 0.300000, 0.310050, 0.413510],
[0.314790, 0.336310, 0.400000, 0.400000, 0.442460],
[0.166510, 0.460440, 0.552570, 1.000000, 0.461610],
[0.401020, 0.559110, 0.403630, 1.016710, 0.671490],
[0.400360, 0.750830, 0.842640, 1.802600, 1.023830],
[3.315300, 1.510380, 2.443650, 1.638820, 2.133990],
[0.530790, 0.745850, 0.693050, 1.458040, 0.804500]]
coeffs[2, 6, :, :] = [
[0.597000, 0.500000, 0.300000, 0.310050, 0.800920],
[0.314790, 0.336310, 0.400000, 0.400000, 0.237040],
[0.166510, 0.460440, 0.552570, 1.000000, 0.581990],
[0.401020, 0.559110, 0.403630, 1.016710, 0.898570],
[0.400360, 0.750830, 0.842640, 1.802600, 3.400390],
[3.315300, 1.510380, 2.443650, 1.638820, 2.508780],
[0.204340, 1.157740, 2.003080, 2.622080, 1.409380]]
coeffs[3, 1, :, :] = [
[1.242210, 1.242210, 1.242210, 1.242210, 1.242210],
[0.056980, 0.056980, 0.656990, 0.656990, 0.925160],
[0.089090, 0.089090, 1.040430, 1.232480, 1.205300],
[1.053850, 1.053850, 1.399690, 1.084640, 1.233340],
[1.151540, 1.151540, 1.118290, 1.531640, 1.411840],
[1.494980, 1.494980, 1.700000, 1.800810, 1.671600],
[1.018450, 1.018450, 1.153600, 1.321890, 1.294670]]
coeffs[3, 2, :, :] = [
[0.700000, 0.700000, 1.023460, 0.700000, 0.945830],
[0.886300, 0.886300, 1.333620, 0.800000, 1.066620],
[0.902180, 0.902180, 0.954330, 1.126690, 1.097310],
[1.095300, 1.075060, 1.176490, 1.139470, 1.096110],
[1.201660, 1.201660, 1.438200, 1.256280, 1.198060],
[1.525850, 1.525850, 1.869160, 1.985410, 1.911590],
[1.288220, 1.082810, 1.286370, 1.166170, 1.119330]]
coeffs[3, 3, :, :] = [
[0.600000, 1.029910, 0.859890, 0.550000, 0.813600],
[0.604450, 1.029910, 0.859890, 0.656700, 0.928840],
[0.455850, 0.750580, 0.804930, 0.823000, 0.911000],
[0.526580, 0.932310, 0.908620, 0.983520, 0.988090],
[1.036110, 1.100690, 0.848380, 1.035270, 1.042380],
[1.048440, 1.652720, 0.900000, 2.350410, 1.082950],
[0.817410, 0.976160, 0.861300, 0.974780, 1.004580]]
coeffs[3, 4, :, :] = [
[0.782110, 0.564280, 0.600000, 0.600000, 0.665740],
[0.894480, 0.680730, 0.541990, 0.800000, 0.669140],
[0.487460, 0.818950, 0.841830, 0.872540, 0.709040],
[0.709310, 0.872780, 0.908480, 0.953290, 0.844350],
[0.863920, 0.947770, 0.876220, 1.078750, 0.936910],
[1.280350, 0.866720, 0.769790, 1.078750, 0.975130],
[0.725420, 0.869970, 0.868810, 0.951190, 0.829220]]
coeffs[3, 5, :, :] = [
[0.791750, 0.654040, 0.483170, 0.409000, 0.597180],
[0.566140, 0.948990, 0.971820, 0.653570, 0.718550],
[0.648710, 0.637730, 0.870510, 0.860600, 0.694300],
[0.637630, 0.767610, 0.925670, 0.990310, 0.847670],
[0.736380, 0.946060, 1.117590, 1.029340, 0.947020],
[1.180970, 0.850000, 1.050000, 0.950000, 0.888580],
[0.700560, 0.801440, 0.961970, 0.906140, 0.823880]]
coeffs[3, 6, :, :] = [
[0.500000, 0.500000, 0.586770, 0.470550, 0.629790],
[0.500000, 0.500000, 1.056220, 1.260140, 0.658140],
[0.500000, 0.500000, 0.631830, 0.842620, 0.582780],
[0.554710, 0.734730, 0.985820, 0.915640, 0.898260],
[0.712510, 1.205990, 0.909510, 1.078260, 0.885610],
[1.899260, 1.559710, 1.000000, 1.150000, 1.120390],
[0.653880, 0.793120, 0.903320, 0.944070, 0.796130]]
coeffs[4, 1, :, :] = [
[1.000000, 1.000000, 1.050000, 1.170380, 1.178090],
[0.960580, 0.960580, 1.059530, 1.179030, 1.131690],
[0.871470, 0.871470, 0.995860, 1.141910, 1.114600],
[1.201590, 1.201590, 0.993610, 1.109380, 1.126320],
[1.065010, 1.065010, 0.828660, 0.939970, 1.017930],
[1.065010, 1.065010, 0.623690, 1.119620, 1.132260],
[1.071570, 1.071570, 0.958070, 1.114130, 1.127110]]
coeffs[4, 2, :, :] = [
[0.950000, 0.973390, 0.852520, 1.092200, 1.096590],
[0.804120, 0.913870, 0.980990, 1.094580, 1.042420],
[0.737540, 0.935970, 0.999940, 1.056490, 1.050060],
[1.032980, 1.034540, 0.968460, 1.032080, 1.015780],
[0.900000, 0.977210, 0.945960, 1.008840, 0.969960],
[0.600000, 0.750000, 0.750000, 0.844710, 0.899100],
[0.926800, 0.965030, 0.968520, 1.044910, 1.032310]]
coeffs[4, 3, :, :] = [
[0.850000, 1.029710, 0.961100, 1.055670, 1.009700],
[0.818530, 0.960010, 0.996450, 1.081970, 1.036470],
[0.765380, 0.953500, 0.948260, 1.052110, 1.000140],
[0.775610, 0.909610, 0.927800, 0.987800, 0.952100],
[1.000990, 0.881880, 0.875950, 0.949100, 0.893690],
[0.902370, 0.875960, 0.807990, 0.942410, 0.917920],
[0.856580, 0.928270, 0.946820, 1.032260, 0.972990]]
coeffs[4, 4, :, :] = [
[0.750000, 0.857930, 0.983800, 1.056540, 0.980240],
[0.750000, 0.987010, 1.013730, 1.133780, 1.038250],
[0.800000, 0.947380, 1.012380, 1.091270, 0.999840],
[0.800000, 0.914550, 0.908570, 0.999190, 0.915230],
[0.778540, 0.800590, 0.799070, 0.902180, 0.851560],
[0.680190, 0.317410, 0.507680, 0.388910, 0.646710],
[0.794920, 0.912780, 0.960830, 1.057110, 0.947950]]
coeffs[4, 5, :, :] = [
[0.750000, 0.833890, 0.867530, 1.059890, 0.932840],
[0.979700, 0.971470, 0.995510, 1.068490, 1.030150],
[0.858850, 0.987920, 1.043220, 1.108700, 1.044900],
[0.802400, 0.955110, 0.911660, 1.045070, 0.944470],
[0.884890, 0.766210, 0.885390, 0.859070, 0.818190],
[0.615680, 0.700000, 0.850000, 0.624620, 0.669300],
[0.835570, 0.946150, 0.977090, 1.049350, 0.979970]]
coeffs[4, 6, :, :] = [
[0.689220, 0.809600, 0.900000, 0.789500, 0.853990],
[0.854660, 0.852840, 0.938200, 0.923110, 0.955010],
[0.938600, 0.932980, 1.010390, 1.043950, 1.041640],
[0.843620, 0.981300, 0.951590, 0.946100, 0.966330],
[0.694740, 0.814690, 0.572650, 0.400000, 0.726830],
[0.211370, 0.671780, 0.416340, 0.297290, 0.498050],
[0.843540, 0.882330, 0.911760, 0.898420, 0.960210]]
coeffs[5, 1, :, :] = [
[1.054880, 1.075210, 1.068460, 1.153370, 1.069220],
[1.000000, 1.062220, 1.013470, 1.088170, 1.046200],
[0.885090, 0.993530, 0.942590, 1.054990, 1.012740],
[0.920000, 0.950000, 0.978720, 1.020280, 0.984440],
[0.850000, 0.908500, 0.839940, 0.985570, 0.962180],
[0.800000, 0.800000, 0.810080, 0.950000, 0.961550],
[1.038590, 1.063200, 1.034440, 1.112780, 1.037800]]
coeffs[5, 2, :, :] = [
[1.017610, 1.028360, 1.058960, 1.133180, 1.045620],
[0.920000, 0.998970, 1.033590, 1.089030, 1.022060],
[0.912370, 0.949930, 0.979770, 1.020420, 0.981770],
[0.847160, 0.935300, 0.930540, 0.955050, 0.946560],
[0.880260, 0.867110, 0.874130, 0.972650, 0.883420],
[0.627150, 0.627150, 0.700000, 0.774070, 0.845130],
[0.973700, 1.006240, 1.026190, 1.071960, 1.017240]]
coeffs[5, 3, :, :] = [
[1.028710, 1.017570, 1.025900, 1.081790, 1.024240],
[0.924980, 0.985500, 1.014100, 1.092210, 0.999610],
[0.828570, 0.934920, 0.994950, 1.024590, 0.949710],
[0.900810, 0.901330, 0.928830, 0.979570, 0.913100],
[0.761030, 0.845150, 0.805360, 0.936790, 0.853460],
[0.626400, 0.546750, 0.730500, 0.850000, 0.689050],
[0.957630, 0.985480, 0.991790, 1.050220, 0.987900]]
coeffs[5, 4, :, :] = [
[0.992730, 0.993880, 1.017150, 1.059120, 1.017450],
[0.975610, 0.987160, 1.026820, 1.075440, 1.007250],
[0.871090, 0.933190, 0.974690, 0.979840, 0.952730],
[0.828750, 0.868090, 0.834920, 0.905510, 0.871530],
[0.781540, 0.782470, 0.767910, 0.764140, 0.795890],
[0.743460, 0.693390, 0.514870, 0.630150, 0.715660],
[0.934760, 0.957870, 0.959640, 0.972510, 0.981640]]
coeffs[5, 5, :, :] = [
[0.965840, 0.941240, 0.987100, 1.022540, 1.011160],
[0.988630, 0.994770, 0.976590, 0.950000, 1.034840],
[0.958200, 1.018080, 0.974480, 0.920000, 0.989870],
[0.811720, 0.869090, 0.812020, 0.850000, 0.821050],
[0.682030, 0.679480, 0.632450, 0.746580, 0.738550],
[0.668290, 0.445860, 0.500000, 0.678920, 0.696510],
[0.926940, 0.953350, 0.959050, 0.876210, 0.991490]]
coeffs[5, 6, :, :] = [
[0.948940, 0.997760, 0.850000, 0.826520, 0.998470],
[1.017860, 0.970000, 0.850000, 0.700000, 0.988560],
[1.000000, 0.950000, 0.850000, 0.606240, 0.947260],
[1.000000, 0.746140, 0.751740, 0.598390, 0.725230],
[0.922210, 0.500000, 0.376800, 0.517110, 0.548630],
[0.500000, 0.450000, 0.429970, 0.404490, 0.539940],
[0.960430, 0.881630, 0.775640, 0.596350, 0.937680]]
coeffs[6, 1, :, :] = [
[1.030000, 1.040000, 1.000000, 1.000000, 1.049510],
[1.050000, 0.990000, 0.990000, 0.950000, 0.996530],
[1.050000, 0.990000, 0.990000, 0.820000, 0.971940],
[1.050000, 0.790000, 0.880000, 0.820000, 0.951840],
[1.000000, 0.530000, 0.440000, 0.710000, 0.928730],
[0.540000, 0.470000, 0.500000, 0.550000, 0.773950],
[1.038270, 0.920180, 0.910930, 0.821140, 1.034560]]
coeffs[6, 2, :, :] = [
[1.041020, 0.997520, 0.961600, 1.000000, 1.035780],
[0.948030, 0.980000, 0.900000, 0.950360, 0.977460],
[0.950000, 0.977250, 0.869270, 0.800000, 0.951680],
[0.951870, 0.850000, 0.748770, 0.700000, 0.883850],
[0.900000, 0.823190, 0.727450, 0.600000, 0.839870],
[0.850000, 0.805020, 0.692310, 0.500000, 0.788410],
[1.010090, 0.895270, 0.773030, 0.816280, 1.011680]]
coeffs[6, 3, :, :] = [
[1.022450, 1.004600, 0.983650, 1.000000, 1.032940],
[0.943960, 0.999240, 0.983920, 0.905990, 0.978150],
[0.936240, 0.946480, 0.850000, 0.850000, 0.930320],
[0.816420, 0.885000, 0.644950, 0.817650, 0.865310],
[0.742960, 0.765690, 0.561520, 0.700000, 0.827140],
[0.643870, 0.596710, 0.474460, 0.600000, 0.651200],
[0.971740, 0.940560, 0.714880, 0.864380, 1.001650]]
coeffs[6, 4, :, :] = [
[0.995260, 0.977010, 1.000000, 1.000000, 1.035250],
[0.939810, 0.975250, 0.939980, 0.950000, 0.982550],
[0.876870, 0.879440, 0.850000, 0.900000, 0.917810],
[0.873480, 0.873450, 0.751470, 0.850000, 0.863040],
[0.761470, 0.702360, 0.638770, 0.750000, 0.783120],
[0.734080, 0.650000, 0.600000, 0.650000, 0.715660],
[0.942160, 0.919100, 0.770340, 0.731170, 0.995180]]
coeffs[6, 5, :, :] = [
[0.952560, 0.916780, 0.920000, 0.900000, 1.005880],
[0.928620, 0.994420, 0.900000, 0.900000, 0.983720],
[0.913070, 0.850000, 0.850000, 0.800000, 0.924280],
[0.868090, 0.807170, 0.823550, 0.600000, 0.844520],
[0.769570, 0.719870, 0.650000, 0.550000, 0.733500],
[0.580250, 0.650000, 0.600000, 0.500000, 0.628850],
[0.904770, 0.852650, 0.708370, 0.493730, 0.949030]]
coeffs[6, 6, :, :] = [
[0.911970, 0.800000, 0.800000, 0.800000, 0.956320],
[0.912620, 0.682610, 0.750000, 0.700000, 0.950110],
[0.653450, 0.659330, 0.700000, 0.600000, 0.856110],
[0.648440, 0.600000, 0.641120, 0.500000, 0.695780],
[0.570000, 0.550000, 0.598800, 0.400000, 0.560150],
[0.475230, 0.500000, 0.518640, 0.339970, 0.520230],
[0.743440, 0.592190, 0.603060, 0.316930, 0.794390]]
return coeffs[1:, 1:, :, :]
def dni(ghi, dhi, zenith, clearsky_dni=None, clearsky_tolerance=1.1,
zenith_threshold_for_zero_dni=88.0,
zenith_threshold_for_clearsky_limit=80.0):
"""
Determine DNI from GHI and DHI.
When calculating the DNI from GHI and DHI the calculated DNI may be
unreasonably high or negative for zenith angles close to 90 degrees
(sunrise/sunset transitions). This function identifies unreasonable DNI
values and sets them to NaN. If the clearsky DNI is given unreasonably high
values are cut off.
Parameters
----------
ghi : Series
Global horizontal irradiance.
dhi : Series
Diffuse horizontal irradiance.
zenith : Series
True (not refraction-corrected) zenith angles in decimal
degrees. Angles must be >=0 and <=180.
clearsky_dni : None or Series, default None
Clearsky direct normal irradiance.
clearsky_tolerance : float, default 1.1
If 'clearsky_dni' is given this parameter can be used to allow a
tolerance by how much the calculated DNI value can be greater than
the clearsky value before it is identified as an unreasonable value.
zenith_threshold_for_zero_dni : float, default 88.0
Non-zero DNI values for zenith angles greater than or equal to
'zenith_threshold_for_zero_dni' will be set to NaN.
zenith_threshold_for_clearsky_limit : float, default 80.0
DNI values for zenith angles greater than or equal to
'zenith_threshold_for_clearsky_limit' and smaller the
'zenith_threshold_for_zero_dni' that are greater than the clearsky DNI
(times allowed tolerance) will be corrected. Only applies if
'clearsky_dni' is not None.
Returns
-------
dni : Series
The modeled direct normal irradiance.
"""
# calculate DNI
dni = (ghi - dhi) / tools.cosd(zenith)
# cutoff negative values
dni[dni < 0] = float('nan')
# set non-zero DNI values for zenith angles >=
# zenith_threshold_for_zero_dni to NaN
dni[(zenith >= zenith_threshold_for_zero_dni) & (dni != 0)] = float('nan')
# correct DNI values for zenith angles greater or equal to the
# zenith_threshold_for_clearsky_limit and smaller than the
# upper_cutoff_zenith that are greater than the clearsky DNI (times
# clearsky_tolerance)
if clearsky_dni is not None:
max_dni = clearsky_dni * clearsky_tolerance
dni[(zenith >= zenith_threshold_for_clearsky_limit) &
(zenith < zenith_threshold_for_zero_dni) &
(dni > max_dni)] = max_dni
return dni
| bsd-3-clause |
rahul-c1/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
dwhswenson/contact_map | contact_map/contact_map.py | 1 | 36427 | """
Contact map analysis.
"""
# Maintainer: David W.H. Swenson ([email protected])
# Licensed under LGPL, version 2.1 or greater
import collections
import itertools
import pickle
import json
import warnings
import numpy as np
import pandas as pd
import mdtraj as md
from .contact_count import ContactCount
from .atom_indexer import AtomSlicedIndexer, IdentityIndexer
from .py_2_3 import inspect_method_arguments
from .fix_parameters import ParameterFixer
# TODO:
# * switch to something where you can define the haystack -- the trick is to
# replace the current mdtraj._compute_neighbors with something that
# build a voxel list for the haystack, and then checks the voxel for each
# query atom. Doesn't look like anything is doing that now: neighbors
# doesn't use voxels, neighborlist doesn't limit the haystack
def _residue_and_index(residue, topology):
res = residue
try:
res_idx = res.index
except AttributeError:
res_idx = residue
res = topology.residue(res_idx)
return (res, res_idx)
def residue_neighborhood(residue, n=1):
"""Find n nearest neighbor residues
Parameters
----------
residue : mdtraj.Residue
this residue
n : positive int
number of neighbors to find
Returns
-------
list of int
neighbor residue numbers
"""
neighborhood = set([residue.index+i for i in range(-n, n+1)])
chain = set([res.index for res in residue.chain.residues])
# we could probably choose an faster approach here, but this is pretty
# good, and it only gets run once per residue
return [idx for idx in neighborhood if idx in chain]
def _residue_for_atom(topology, atom_list):
return set([topology.atom(a).residue for a in atom_list])
def _residue_idx_for_atom(topology, atom_list):
return set([topology.atom(a).residue.index for a in atom_list])
def _range_from_iterable(iterable):
sort = sorted(iterable)
return (sort[0], sort[-1]+1)
class ContactsDict(object):
"""Dict-like object giving access to atom or residue contacts.
In some algorithmic situations, either the atom_contacts or the
residue_contacts might be used. Rather than use lots of if-statements,
or build an actual dictionary with the associated time cost of
generating both, this class provides an object that allows dict-like
access to either the atom or residue contacts.
Atom-based contacts (``contact.atom_contacts``) can be accessed with as
``contact_dict['atom']`` or ``contact_dict['atoms']``. Residue-based
contacts can be accessed with the keys ``'residue'``, ``'residues'``, or
``'res'``.
Parameters
----------
contacts : :class:`.ContactObject`
contact object with fundamental data
"""
def __init__(self, contacts):
self.contacts = contacts
def __getitem__(self, atom_or_res):
if atom_or_res in ["atom", "atoms"]:
contacts = self.contacts.atom_contacts
elif atom_or_res in ["residue", "residues", "res"]:
contacts = self.contacts.residue_contacts
else:
raise RuntimeError("Bad value for atom_or_res: " +
str(atom_or_res))
return contacts
class ContactObject(object):
"""
Generic object for contact map related analysis. Effectively abstract.
Much of what we need to do the contact map analysis is the same for all
analyses. It's in here.
"""
# Class default for use atom slice, None tries to be smart
_class_use_atom_slice = None
def __init__(self, topology, query, haystack, cutoff, n_neighbors_ignored):
# all inits required: no defaults for abstract class!
self._topology = topology
if query is None:
query = topology.select("not water and symbol != 'H'")
if haystack is None:
haystack = topology.select("not water and symbol != 'H'")
# make things private and accessible through read-only properties so
# they don't get accidentally changed after analysis
self._cutoff = cutoff
self._query = set(query)
self._haystack = set(haystack)
self._query_res_idx = set(_residue_idx_for_atom(topology, query))
self._haystack_res_idx = set(_residue_idx_for_atom(topology, haystack))
# Make tuple for efficient lookupt
all_atoms_set = set(query).union(set(haystack))
self._all_atoms = tuple(sorted(list(all_atoms_set)))
self._all_residues = _residue_idx_for_atom(self._topology,
all_atoms_set)
self._use_atom_slice = self._set_atom_slice(self._all_atoms)
has_indexer = getattr(self, 'indexer', None) is not None
if not has_indexer:
Indexer = {True: AtomSlicedIndexer,
False: IdentityIndexer}[self.use_atom_slice]
self.indexer = Indexer(topology, self._query, self._haystack,
self._all_atoms)
self._n_neighbors_ignored = n_neighbors_ignored
@classmethod
def from_contacts(cls, atom_contacts, residue_contacts, topology,
query=None, haystack=None, cutoff=0.45,
n_neighbors_ignored=2, indexer=None):
obj = cls.__new__(cls)
obj.indexer = indexer
super(cls, obj).__init__(topology, query, haystack, cutoff,
n_neighbors_ignored)
def get_contact_counter(contact):
if isinstance(contact, ContactCount):
return contact.counter
else:
return contact
obj._atom_contacts = get_contact_counter(atom_contacts)
obj._residue_contacts = get_contact_counter(residue_contacts)
return obj
def _set_atom_slice(self, all_atoms):
""" Set atom slice logic """
if (self._class_use_atom_slice is None and
not len(all_atoms) < self._topology.n_atoms):
# Don't use if there are no atoms to be sliced
return False
elif self._class_use_atom_slice is None:
# Use if there are atms to be sliced
return True
else:
# Use class default
return self._class_use_atom_slice
@property
def contacts(self):
""":class:`.ContactsDict` : contact dict for these contacts"""
return ContactsDict(self)
def __hash__(self):
return hash((self.cutoff, self.n_neighbors_ignored,
frozenset(self._query), frozenset(self._haystack),
self.topology))
def __eq__(self, other):
is_equal = (self.cutoff == other.cutoff
and self.n_neighbors_ignored == other.n_neighbors_ignored
and self.query == other.query
and self.haystack == other.haystack
and self.topology == other.topology)
return is_equal
def to_dict(self):
"""Convert object to a dict.
Keys should be strings; values should be (JSON-) serializable.
See also
--------
from_dict
"""
# need to explicitly convert possible np.int64 to int in several
dct = {
'topology': self._serialize_topology(self.topology),
'cutoff': self._cutoff,
'query': list([int(val) for val in self._query]),
'haystack': list([int(val) for val in self._haystack]),
'query_res_idx': list([int(val) for val
in self._query_res_idx]),
'haystack_res_idx': list([int(val) for val in
self._haystack_res_idx]),
'all_atoms': tuple(
[int(val) for val in self._all_atoms]),
'all_residues': tuple(
[int(val) for val in self._all_residues]),
'n_neighbors_ignored': self._n_neighbors_ignored,
'atom_contacts':
self._serialize_contact_counter(self._atom_contacts),
'residue_contacts':
self._serialize_contact_counter(self._residue_contacts),
'use_atom_slice': self._use_atom_slice}
return dct
@classmethod
def from_dict(cls, dct):
"""Create object from dict.
Parameters
----------
dct : dict
dict-formatted serialization (see to_dict for details)
See also
--------
to_dict
"""
deserialize_set = set
deserialize_atom_to_residue_dct = lambda d: {int(k): d[k] for k in d}
deserialization_helpers = {
'topology': cls._deserialize_topology,
'atom_contacts': cls._deserialize_contact_counter,
'residue_contacts': cls._deserialize_contact_counter,
'query': deserialize_set,
'haystack': deserialize_set,
'query_res_idx': deserialize_set,
'haystack_res_idx': deserialize_set,
'all_atoms': deserialize_set,
'all_residues': deserialize_set,
'atom_idx_to_residue_idx': deserialize_atom_to_residue_dct
}
for key in deserialization_helpers:
if key in dct:
dct[key] = deserialization_helpers[key](dct[key])
kwarg_keys = inspect_method_arguments(cls.__init__)
set_keys = set(dct.keys())
missing = set(kwarg_keys) - set_keys
dct.update({k: None for k in missing})
instance = cls.__new__(cls)
for k in dct:
setattr(instance, "_" + k, dct[k])
return instance
@staticmethod
def _deserialize_topology(topology_json):
"""Create MDTraj topology from JSON-serialized version"""
table, bonds = json.loads(topology_json)
topology_df = pd.read_json(table)
topology = md.Topology.from_dataframe(topology_df,
np.array(bonds))
return topology
@staticmethod
def _serialize_topology(topology):
"""Serialize MDTraj topology (to JSON)"""
table, bonds = topology.to_dataframe()
json_tuples = (table.to_json(), bonds.tolist())
return json.dumps(json_tuples)
# TODO: adding a separate object for these frozenset counters will be
# useful for many things, and this serialization should be moved there
@staticmethod
def _serialize_contact_counter(counter):
"""JSON string from contact counter"""
# have to explicitly convert to int because json doesn't know how to
# serialize np.int64 objects, which we get in Python 3
serializable = {json.dumps([int(val) for val in key]): counter[key]
for key in counter}
return json.dumps(serializable)
@staticmethod
def _deserialize_contact_counter(json_string):
"""Contact counted from JSON string"""
dct = json.loads(json_string)
counter = collections.Counter({
frozenset(json.loads(key)): dct[key] for key in dct
})
return counter
def to_json(self):
"""JSON-serialized version of this object.
See also
--------
from_json
"""
dct = self.to_dict()
return json.dumps(dct)
@classmethod
def from_json(cls, json_string):
"""Create object from JSON string
Parameters
----------
json_string : str
JSON-serialized version of the object
See also
--------
to_json
"""
dct = json.loads(json_string)
return cls.from_dict(dct)
def _check_compatibility(self, other, err=AssertionError):
compatibility_attrs = ['cutoff', 'topology', 'query', 'haystack',
'n_neighbors_ignored']
failed_attr = {}
err_msg = ""
for attr in compatibility_attrs:
self_val = getattr(self, attr)
other_val = getattr(other, attr)
if self_val != other_val:
failed_attr[attr] = (self_val, other_val)
err_msg += " {attr}: {self} != {other}\n".format(
attr=attr, self=str(self_val), other=str(other_val)
)
msg = "Incompatible ContactObjects:\n"
msg += err_msg
if failed_attr and err is not None:
raise err(msg)
else:
return failed_attr
def save_to_file(self, filename, mode="w"):
"""Save this object to the given file.
Parameters
----------
filename : string
the file to write to
mode : 'w' or 'a'
file writing mode. Use 'w' to overwrite, 'a' to append. Note
that writing by bytes ('b' flag) is automatically added.
See also
--------
from_file : load from generated file
"""
with open(filename, mode+"b") as f:
pickle.dump(self, f)
@classmethod
def from_file(cls, filename):
"""Load this object from a given file
Parameters
----------
filename : string
the file to read from
Returns
-------
:class:`.ContactObject`:
the reloaded object
See also
--------
save_to_file : save to a file
"""
with open(filename, "rb") as f:
reloaded = pickle.load(f)
return reloaded
def __sub__(self, other):
return ContactDifference(positive=self, negative=other)
@property
def cutoff(self):
"""float : cutoff distance for contacts, in nanometers"""
return self._cutoff
@property
def n_neighbors_ignored(self):
"""int : number of neighbor residues (in same chain) to ignore"""
return self._n_neighbors_ignored
@property
def query(self):
"""list of int : indices of atoms to include as query"""
return list(self._query)
@property
def haystack(self):
"""list of int : indices of atoms to include as haystack"""
return list(self._haystack)
@property
def all_atoms(self):
"""list of int: all atom indices used in the contact map"""
return list(self._all_atoms)
@property
def topology(self):
"""
:class:`mdtraj.Topology` :
topology object for this system
The topology includes information about the atoms, how they are
grouped into residues, and how the residues are grouped into
chains.
"""
return self._topology
@property
def use_atom_slice(self):
"""bool : Indicates if `mdtraj.atom_slice()` is used before calculating
the contact map"""
return self._use_atom_slice
@property
def _residue_ignore_atom_idxs(self):
"""dict : maps query residue index to atom indices to ignore"""
all_atoms_set = set(self._all_atoms)
result = {}
for residue_idx in self.indexer.residue_query_atom_idxs.keys():
residue = self.topology.residue(residue_idx)
# Several steps to go residue indices -> atom indices
ignore_residue_idxs = residue_neighborhood(
residue,
self._n_neighbors_ignored
)
ignore_residues = [self.topology.residue(idx)
for idx in ignore_residue_idxs]
ignore_atoms = sum([list(res.atoms)
for res in ignore_residues], [])
ignore_atom_idxs = self.indexer.ignore_atom_idx(ignore_atoms,
all_atoms_set)
result[residue_idx] = ignore_atom_idxs
return result
@property
def haystack_residues(self):
"""list : residues for atoms in the haystack"""
return _residue_for_atom(self.topology, self.haystack)
@property
def query_residues(self):
"""list : residues for atoms in the query"""
return _residue_for_atom(self.topology, self.query)
@property
def query_range(self):
"""return an tuple with the (min, max+1) of query"""
return _range_from_iterable(self.query)
@property
def haystack_range(self):
"""return an tuple with the (min, max+1) of haystack"""
return _range_from_iterable(self.haystack)
@property
def haystack_residue_range(self):
"""(int, int): min and (max + 1) of haystack residue indices"""
return _range_from_iterable(self._haystack_res_idx)
@property
def query_residue_range(self):
"""(int, int): min and (max + 1) of query residue indices"""
return _range_from_iterable(self._query_res_idx)
def most_common_atoms_for_residue(self, residue):
"""
Most common atom contact pairs for contacts with the given residue
Parameters
----------
residue : Residue or int
the Residue object or index representing the residue for which
the most common atom contact pairs will be calculated
Returns
-------
list :
Atom contact pairs involving given residue, order of frequency.
Referring to the list as ``l``, each element of the list
``l[e]`` consists of two parts: ``l[e][0]`` is a list containing
the two MDTraj Atom objects that make up the contact, and
``l[e][1]`` is the measure of how often the contact occurs.
"""
residue = _residue_and_index(residue, self.topology)[0]
residue_atoms = set(atom.index for atom in residue.atoms)
results = []
for atoms, number in self.atom_contacts.most_common_idx():
atoms_in_residue = atoms & residue_atoms
if atoms_in_residue:
as_atoms = [self.topology.atom(a) for a in atoms]
results += [(as_atoms, number)]
return results
def most_common_atoms_for_contact(self, contact_pair):
"""
Most common atom contacts for a given residue contact pair
Parameters
----------
contact_pair : length 2 list of Residue or int
the residue contact pair for which the most common atom contact
pairs will be calculated
Returns
-------
list :
Atom contact pairs for the residue contact pair, in order of
frequency. Referring to the list as ``l``, each element of the
list ``l[e]`` consists of two parts: ``l[e][0]`` is a list
containing the two MDTraj Atom objects that make up the contact,
and ``l[e][1]`` is the measure of how often the contact occurs.
"""
contact_pair = list(contact_pair)
res_1 = _residue_and_index(contact_pair[0], self.topology)[0]
res_2 = _residue_and_index(contact_pair[1], self.topology)[0]
atom_idxs_1 = set(atom.index for atom in res_1.atoms)
atom_idxs_2 = set(atom.index for atom in res_2.atoms)
all_atom_pairs = [
frozenset(pair)
for pair in itertools.product(atom_idxs_1, atom_idxs_2)
]
result = [([self.topology.atom(idx) for idx in contact[0]], contact[1])
for contact in self.atom_contacts.most_common_idx()
if frozenset(contact[0]) in all_atom_pairs]
return result
def _contact_map(self, trajectory, frame_number, residue_query_atom_idxs,
residue_ignore_atom_idxs):
"""
Returns atom and residue contact maps for the given frame.
Parameters
----------
frame : mdtraj.Trajectory
the desired frame (uses the first frame in this trajectory)
residue_query_atom_idxs : dict
residue_ignore_atom_idxs : dict
Returns
-------
atom_contacts : collections.Counter
residue_contact : collections.Counter
"""
used_trajectory = self.indexer.slice_trajectory(trajectory)
neighborlist = md.compute_neighborlist(used_trajectory, self.cutoff,
frame_number)
contact_pairs = set([])
residue_pairs = set([])
haystack = self.indexer.haystack
atom_idx_to_residue_idx = self.indexer.atom_idx_to_residue_idx
for residue_idx in residue_query_atom_idxs:
ignore_atom_idxs = set(residue_ignore_atom_idxs[residue_idx])
query_idxs = residue_query_atom_idxs[residue_idx]
for atom_idx in query_idxs:
# sets should make this fast, esp since neighbor_idxs
# should be small and s-t is avg cost len(s)
neighbor_idxs = set(neighborlist[atom_idx])
contact_neighbors = neighbor_idxs - ignore_atom_idxs
contact_neighbors = contact_neighbors & haystack
# frozenset is unique key independent of order
# local_pairs = set(frozenset((atom_idx, neighb))
# for neighb in contact_neighbors)
local_pairs = set(map(
frozenset,
itertools.product([atom_idx], contact_neighbors)
))
contact_pairs |= local_pairs
# contact_pairs |= set(frozenset((atom_idx, neighb))
# for neighb in contact_neighbors)
local_residue_partners = set(atom_idx_to_residue_idx[a]
for a in contact_neighbors)
local_res_pairs = set(map(
frozenset,
itertools.product([residue_idx], local_residue_partners)
))
residue_pairs |= local_res_pairs
atom_contacts = collections.Counter(contact_pairs)
# residue_pairs = set(
# frozenset(self._atom_idx_to_residue_idx[aa] for aa in pair)
# for pair in contact_pairs
# )
residue_contacts = collections.Counter(residue_pairs)
return (atom_contacts, residue_contacts)
@property
def atom_contacts(self):
raise NotImplementedError()
@property
def residue_contacts(self):
raise NotImplementedError()
CONTACT_MAP_ERROR = (
"The ContactMap class has been removed. Please use ContactFrequency."
" For more, see: https://github.com/dwhswenson/contact_map/issues/82"
)
def ContactMap(*args, **kwargs): # -no-cov-
raise RuntimeError(CONTACT_MAP_ERROR)
class ContactFrequency(ContactObject):
"""
Contact frequency (atomic and residue) for a trajectory.
The contact frequency is defined as fraction of the trajectory that a
certain contact is made. This object calculates this quantity for all
contacts with atoms in the `query` residue, with "contact" defined as
being within a certain cutoff distance.
Parameters
----------
trajectory : mdtraj.Trajectory
Trajectory (segment) to analyze
query : list of int
Indices of the atoms to be included as query. Default ``None``
means all atoms.
haystack : list of int
Indices of the atoms to be included as haystack. Default ``None``
means all atoms.
cutoff : float
Cutoff distance for contacts, in nanometers. Default 0.45.
n_neighbors_ignored : int
Number of neighboring residues (in the same chain) to ignore.
Default 2.
"""
# Default for use_atom_slice, None tries to be smart
_class_use_atom_slice = None
_pending_dep_msg = (
"ContactFrequency will be renamed to ContactMap in version 0.8. "
"Invoking it as ContactFrequency will be deprecated in 0.8. For "
"more, see https://github.com/dwhswenson/contact_map/issues/82"
)
def __init__(self, trajectory, query=None, haystack=None, cutoff=0.45,
n_neighbors_ignored=2):
warnings.warn(self._pending_dep_msg, PendingDeprecationWarning)
self._n_frames = len(trajectory)
super(ContactFrequency, self).__init__(trajectory.topology,
query, haystack, cutoff,
n_neighbors_ignored)
contacts = self._build_contact_map(trajectory)
(self._atom_contacts, self._residue_contacts) = contacts
@classmethod
def from_contacts(cls, atom_contacts, residue_contacts, n_frames,
topology, query=None, haystack=None, cutoff=0.45,
n_neighbors_ignored=2, indexer=None):
warnings.warn(cls._pending_dep_msg, PendingDeprecationWarning)
obj = super(ContactFrequency, cls).from_contacts(
atom_contacts, residue_contacts, topology, query, haystack,
cutoff, n_neighbors_ignored, indexer
)
obj._n_frames = n_frames
return obj
@classmethod
def from_dict(cls, dct):
warnings.warn(cls._pending_dep_msg, PendingDeprecationWarning)
return super(ContactFrequency, cls).from_dict(dct)
@classmethod
def from_file(cls, filename):
warnings.warn(cls._pending_dep_msg, PendingDeprecationWarning)
return super(ContactFrequency, cls).from_file(filename)
def __hash__(self):
return hash((super(ContactFrequency, self).__hash__(),
tuple(self._atom_contacts.items()),
tuple(self._residue_contacts.items()),
self.n_frames))
def __eq__(self, other):
is_equal = (super(ContactFrequency, self).__eq__(other)
and self._atom_contacts == other._atom_contacts
and self._residue_contacts == other._residue_contacts
and self.n_frames == other.n_frames)
return is_equal
def to_dict(self):
dct = super(ContactFrequency, self).to_dict()
dct.update({'n_frames': self.n_frames})
return dct
def _build_contact_map(self, trajectory):
# We actually build the contact map on a per-residue basis, although
# we save it on a per-atom basis. This allows us ignore
# n_nearest_neighbor residues.
# TODO: this whole thing should be cleaned up and should replace
# MDTraj's really slow old compute_contacts by using MDTraj's new
# neighborlists (unless the MDTraj people do that first).
atom_contacts_count = collections.Counter([])
residue_contacts_count = collections.Counter([])
# cache things that can be calculated once based on the topology
# (namely, which atom indices matter for each residue)
residue_ignore_atom_idxs = self._residue_ignore_atom_idxs
residue_query_atom_idxs = self.indexer.residue_query_atom_idxs
used_trajectory = self.indexer.slice_trajectory(trajectory)
for frame_num in range(len(trajectory)):
frame_contacts = self._contact_map(used_trajectory, frame_num,
residue_query_atom_idxs,
residue_ignore_atom_idxs)
frame_atom_contacts = frame_contacts[0]
frame_residue_contacts = frame_contacts[1]
atom_contacts_count.update(frame_atom_contacts)
residue_contacts_count += frame_residue_contacts
atom_contacts_count = \
self.indexer.convert_atom_contacts(atom_contacts_count)
return (atom_contacts_count, residue_contacts_count)
@property
def n_frames(self):
"""Number of frames in the mapped trajectory"""
return self._n_frames
def add_contact_frequency(self, other):
"""Add results from `other` to the internal counter.
Parameters
----------
other : :class:`.ContactFrequency`
contact frequency made from the frames to remove from this
contact frequency
"""
self._check_compatibility(other)
self._atom_contacts += other._atom_contacts
self._residue_contacts += other._residue_contacts
self._n_frames += other._n_frames
def subtract_contact_frequency(self, other):
"""Subtracts results from `other` from internal counter.
Note that this is intended for the case that you're removing a
subtrajectory of the already-calculated trajectory. If you want to
compare two different contact frequency maps, use
:class:`.ContactDifference`.
Parameters
----------
other : :class:`.ContactFrequency`
contact frequency made from the frames to remove from this
contact frequency
"""
self._check_compatibility(other)
self._atom_contacts -= other._atom_contacts
self._residue_contacts -= other._residue_contacts
self._n_frames -= other._n_frames
@property
def atom_contacts(self):
"""Atoms pairs mapped to fraction of trajectory with that contact"""
return ContactCount(collections.Counter({
item[0]: float(item[1])/self.n_frames
for item in self._atom_contacts.items()
}), self.topology.atom, self.query_range, self.haystack_range,
self.topology.n_atoms)
@property
def residue_contacts(self):
"""Residue pairs mapped to fraction of trajectory with that contact"""
return ContactCount(collections.Counter({
item[0]: float(item[1])/self.n_frames
for item in self._residue_contacts.items()
}), self.topology.residue, self.query_residue_range,
self.haystack_residue_range, self.topology.n_residues)
class ContactDifference(ContactObject):
"""
Contact map comparison (atomic and residue).
This can compare single frames or entire trajectories (or even mix the
two!) While this can be directly instantiated by the user, the more
common way to make this object is by using the ``-`` operator, i.e.,
``diff = map_1 - map_2``.
"""
# Some class variables on how we handle mismatches, mainly for subclassing.
_allow_mismatched_atoms = False
_allow_mismatched_residues = False
_override_topology = True
def __init__(self, positive, negative):
self.positive = positive
self.negative = negative
fix_parameters = ParameterFixer(
allow_mismatched_atoms=self._allow_mismatched_atoms,
allow_mismatched_residues=self._allow_mismatched_residues,
override_topology=self._override_topology)
(topology, query,
haystack, cutoff,
n_neighbors_ignored) = fix_parameters.get_parameters(positive,
negative)
self._all_atoms_intersect = set(
positive._all_atoms).intersection(negative._all_atoms)
self._all_residues_intersect = set(
positive._all_residues).intersection(negative._all_residues)
super(ContactDifference, self).__init__(topology,
query,
haystack,
cutoff,
n_neighbors_ignored)
def to_dict(self):
"""Convert object to a dict.
Keys should be strings; values should be (JSON-) serializable.
See also
--------
from_dict
"""
return {
'positive': self.positive.to_json(),
'negative': self.negative.to_json(),
'positive_cls': self.positive.__class__.__name__,
'negative_cls': self.negative.__class__.__name__
}
@classmethod
def from_dict(cls, dct):
"""Create object from dict.
Parameters
----------
dct : dict
dict-formatted serialization (see to_dict for details)
See also
--------
to_dict
"""
# TODO: add searching for subclasses (http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/)
supported_classes = [ContactMap, ContactFrequency]
supported_classes_dict = {class_.__name__: class_
for class_ in supported_classes}
def rebuild(pos_neg):
class_name = dct[pos_neg + "_cls"]
try:
cls_ = supported_classes_dict[class_name]
except KeyError: # pragma: no cover
raise RuntimeError("Can't rebuild class " + class_name)
obj = cls_.from_json(dct[pos_neg])
return obj
positive = rebuild('positive')
negative = rebuild('negative')
return cls(positive, negative)
def __sub__(self, other):
raise NotImplementedError
def contact_map(self, *args, **kwargs): #pylint: disable=W0221
raise NotImplementedError
@classmethod
def from_contacts(self, *args, **kwargs): #pylint: disable=W0221
raise NotImplementedError
@property
def atom_contacts(self):
return self._get_filtered_sub(pos_count=self.positive.atom_contacts,
neg_count=self.negative.atom_contacts,
selection=self._all_atoms_intersect,
object_f=self.topology.atom,
max_size=self.topology.n_atoms)
@property
def residue_contacts(self):
return self._get_filtered_sub(pos_count=self.positive.residue_contacts,
neg_count=self.negative.residue_contacts,
selection=self._all_residues_intersect,
object_f=self.topology.residue,
max_size=self.topology.n_residues)
def _get_filtered_sub(self, pos_count, neg_count, selection, *args,
**kwargs):
"""Get a filtered subtraction between two ContactCounts"""
filtered_pos = pos_count.filter(selection)
filtered_neg = neg_count.filter(selection)
diff = collections.Counter(filtered_pos.counter)
diff.subtract(filtered_neg.counter)
return ContactCount(diff, *args, **kwargs)
class AtomMismatchedContactDifference(ContactDifference):
"""
Contact map comparison (only residues).
"""
_allow_mismatched_atoms = True
def most_common_atoms_for_contact(self, *args, **kwargs):
self._missing_atom_contacts()
def most_common_atoms_for_residue(self, *args, **kwargs):
self._missing_atom_contacts()
@property
def atom_contacts(self):
self._missing_atom_contacts()
@property
def haystack_residues(self):
self._missing_atom_contacts()
@property
def query_residues(self):
self._missing_atom_contacts()
def _missing_atom_contacts(self):
raise RuntimeError("Different atom indices involved between the two"
" maps, so this does not make sense.")
class ResidueMismatchedContactDifference(ContactDifference):
"""
Contact map comparison (only atoms).
"""
_allow_mismatched_residues = True
@property
def residue_contacts(self):
self._missing_residue_contacts()
@property
def _residue_ignore_atom_idxs(self):
self._missing_residue_contacts()
def most_common_atoms_for_contact(self, *args, **kwargs):
self._missing_residue_contacts()
def most_common_atoms_for_residue(self, *args, **kwargs):
self._missing_residue_contacts()
@property
def haystack_residues(self):
self._missing_residue_contacts()
@property
def query_residues(self):
self._missing_residue_contacts()
def _missing_residue_contacts(self):
raise RuntimeError("Different residue indices involved between the two"
" maps, so this does not make sense.")
class OverrideTopologyContactDifference(ContactDifference):
"""
Contact map comparison with a user provided Topology.
"""
def __init__(self, positive, negative, topology):
self._override_topology = topology
super().__init__(positive, negative)
| lgpl-2.1 |
brianlorenz/COSMOS_IMACS_Redshifts | Emission_Fitting/FindAvMedian.py | 1 | 2183 | #Creates a BPT diagram for all objects, and a second figure that shows objects for which single lines are low
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux.txt'
#Location of the equivalent width data
ewdata = '/Users/blorenz/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
ew_df = ascii.read(ewdata).to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#The location of the muzzin et al data:
mdatapath = '/Users/blorenz/COSMOS/muzzin_data/UVISTA_final_colors_sfrs_v4.1.dat'
#Read in the muzzin data
mdata = ascii.read(mdatapath).to_pandas()
mdata = mdata.rename(columns={'ID':'OBJID'})
fluxdata = pd.merge(fluxdata,mdata)
#Location of the reddening data
reddata = '/Users/blorenz/COSMOS/COSMOSData/reddenings.txt'
#Read in the ew of the lines
red_df = ascii.read(reddata).to_pandas()
fluxdata = pd.merge(fluxdata,red_df,on='fluxfile')
fluxdata = fluxdata[fluxdata.av>0]
av_med_df = pd.DataFrame()
av_med_df.at[0,'mr1'] = np.median(fluxdata[fluxdata.LMASS<9.25].av)
av_med_df.at[0,'mr2'] = np.median(fluxdata[np.logical_and(fluxdata.LMASS>9.25,fluxdata.LMASS<9.5)].av)
av_med_df.at[0,'mr3'] = np.median(fluxdata[np.logical_and(fluxdata.LMASS>9.5,fluxdata.LMASS<9.75)].av)
av_med_df.at[0,'mr4'] = np.median(fluxdata[fluxdata.LMASS>9.75].av)
av_med_df.to_csv('/Users/blorenz/COSMOS/COSMOSData/av_med_df.txt',index=False)
| mit |
lol/BCI-BO-old | BCI_Framework/Learner_Manager.py | 1 | 8285 | from sklearn.metrics import zero_one_loss, mean_squared_error
import logging
from sklearn.preprocessing import StandardScaler
import numpy as np
import cPickle
import Learner_Factory
import Classifier_Parameter_Grid_Generator
from sklearn.utils import shuffle
from sklearn.cross_validation import StratifiedKFold, cross_val_score
import json
class Learner_Manager:
def __init__(self, config, learner_name, feature_name):
""" """
self.feature_name = feature_name
self.learner_name = learner_name
self.logging = logging
if config.configuration['logging_level_str'] == 'INFO':
self.logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
else:
self.logging.basicConfig(level=logging.NOTSET)
self.logging.info('started building a learner!')
self.config = config
self.my_learner_factory = Learner_Factory.Learner_Factory(config)
def train_learner(self, Xs, Y, X_test = [], Y_test = [], learner_params = [] ,optimal = False):
""" """
if optimal:
self.fit_opt_learner(Xs, Y, X_test, Y_test, learner_params)
else:
self.train_learner_cv(Xs, Y)
def scale_training_data(self, Xs):
scaled_Xs = []
for X in Xs:
X = np.asarray( X, dtype=np.float32, order='F')
scaler = StandardScaler()
X = scaler.fit_transform(X)
scaled_Xs.append(X)
return scaled_Xs
def scale_all_data(self, X, Y, X_test, Y_test):
''' '''
self.logging.info('Standardizing data!')
Y_test = np.array(Y_test)
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_test = scaler.transform(X_test)
self.logging.info('X size is: %s and Y size is: %s and X_test size is: %s and Y_test size is: %s',
'_'.join(map(str,X.shape)), str(len(Y)), '_'.join(map(str,X_test.shape)), str(len(Y_test)))
return X, Y, X_test, Y_test
def fit_opt_learner(self, X, Y, X_test, Y_test, learner_params):
''' '''
# clf.fit(X, Y)
X, Y, X_test, Y_test = self.scale_all_data(X[0], Y, X_test[0], Y_test)
learner = self.my_learner_factory.create_learner(self.learner_name)
learner.set_params_dict(learner_params)
learner.fit(X, Y)
Y_pred_train = learner.predict(X)
Y_pred = learner.predict(X_test)
nonnan_indices = ~np.isnan(Y_test)
error = learner.test_phase_loss(Y_test[nonnan_indices], Y_pred[nonnan_indices])
if type(error) == list:
for err in error:
self.logging.info('error is %s\n', str(err))
else:
self.logging.info('error is %s', str(error))
probs_train = learner.learner.predict_proba(X)
probs_test = learner.learner.predict_proba(X_test)
self.logging.info('Writing final results to file ')
np.savez(self.result_opt_path, error=error, Y_pred=Y_pred, Y_pred_train=Y_pred_train, probs_train=probs_train, probs_test = probs_test)
self.logging.info('Writing optimal classifier to file ')
# save the classifier
# with open(self.result_opt_classifier_path + '.pkl', 'wb') as fid:
# cPickle.dump(learner.learner, fid)
@staticmethod
def find_cv_error(res_file_name):
""" """
params = {}
final_results_dict = json.load(open(res_file_name))
error = final_results_dict['error']
params = final_results_dict['error_params']
return error, params
# with open(res_file_name, 'r') as res_file:
# error = float(res_file.readline())
# params = eval(res_file.readline())
# return error, params
@staticmethod
def find_opt_error(opt_res_file_name):
""" """
# params = {}
with open(opt_res_file_name, 'r') as res_file:
error = float(res_file.readline())
# params = eval(res_file.readline())
return error
def train_learner_cv(self, Xs, Y, optimal = False):
assert self.result_path != ''
my_learner = self.my_learner_factory.create_learner(self.learner_name)
Y = np.asarray( Y, dtype=np.short, order='F')
scaled_Xs = self.scale_training_data(Xs)
if self.feature_name in (self.config.configuration['fe_params_dict']).keys():
feature_param_values = (self.config.configuration['fe_params_dict'])[self.feature_name]
feature_param_val_indices = dict(zip( feature_param_values, range(len(feature_param_values))))
param_grid, scores = my_learner.generate_param_grid(feature_param_values, self.learner_name)
else:
feature_param_values = None
param_grid, scores = my_learner.generate_param_grid(None, self.learner_name)
precision_scores, recall_scores = np.zeros(shape = scores.shape), np.zeros(shape = scores.shape)
for i in range(self.config.configuration["number_of_cvs_dict"][self.learner_name]):
for param_ind in range(len(scores)):
print param_grid[param_ind]
my_learner.set_params_list(param_grid[param_ind], i)
if feature_param_values is None:
X = scaled_Xs[0]
else:
X = scaled_Xs[feature_param_val_indices[param_grid[param_ind][-1]]]
X_new, Y_new = shuffle(X, Y, random_state = i)
cv = StratifiedKFold(y = Y_new, n_folds = self.config.configuration["number_of_cv_folds"])
if len(scores.shape) == 3:
#this is for ensemble learning methods # Only RF
cv_errors_sum = np.zeros(scores.shape[2])
precision_sum = np.zeros(scores.shape[2])
recall_sum = np.zeros(scores.shape[2])
elif len(scores.shape) == 2:
cv_errors_sum, precision_sum, recall_sum = 0, 0, 0
for train_index, test_index in cv:
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Y[train_index], Y[test_index]
cv_error_temp, precision_temp, recall_temp = my_learner.fit_calc_cv_scores(X_train, y_train, X_test, y_test)
cv_errors_sum += cv_error_temp
precision_sum += precision_temp
recall_sum += recall_temp
# my_learner.fit(X_train, y_train)
# test_predictions = my_learner.predict(X_test)
# cv_errors_sum += self.predict_forall_estimators(X_test, y_test)
crossval_error = cv_errors_sum/self.config.configuration["number_of_cv_folds"]
precision = precision_sum/self.config.configuration["number_of_cv_folds"]
recall = recall_sum/self.config.configuration["number_of_cv_folds"]
print 'error = ', crossval_error
if len(scores.shape) == 3:
scores[param_ind, i, :] = crossval_error
precision_scores[param_ind, i, :] = precision
recall_scores[param_ind, i, :] = recall
else:
scores[param_ind, i] = crossval_error
precision_scores[param_ind, i] = precision
recall_scores[param_ind, i] = recall
my_learner.write_cv_results_toFile(scores, precision_scores, recall_scores, param_grid, self.result_path)
def set_output_file_path(self, res_path):
self.result_path = res_path
def set_output_file_opt_path(self, res_path):
self.result_opt_path = res_path
def set_output_file_opt_classifier(self, res_path):
self.result_opt_classifier_path = res_path
| gpl-3.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/io/tests/test_sql.py | 7 | 99211 | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import sys
import nose
import warnings
import numpy as np
import pandas as pd
from datetime import datetime, date, time
from pandas.types.common import (is_object_dtype, is_datetime64_dtype,
is_datetime64tz_dtype)
from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
from pandas.compat import StringIO, range, lrange, string_types, PY36
from pandas.tseries.tools import format as date_format
import pandas.io.sql as sql
from pandas.io.sql import read_sql_table, read_sql_query
import pandas.util.testing as tm
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': {
'query': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'mysql': {
'query': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'postgresql': {
'query': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'DateColWithTz',
'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
},
'read_parameters': {
'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s'
},
'read_named_parameters': {
'sqlite': """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
'mysql': """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
'postgresql': """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
},
'create_view': {
'sqlite': """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
}
}
class MixInBase(object):
def tearDown(self):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" %
sql._get_valid_mysql_name(table_name))
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute('SHOW TABLES')
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute("DROP TABLE IF EXISTS %s" %
sql._get_valid_sqlite_name(table_name))
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute(
"SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest(unittest.TestCase):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
import io
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table('iris_view')
self._get_exec().execute(SQL_STRINGS['create_view'][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906,
-0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543,
0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(dict(A=[4, 1, 3, 6],
B=['asd', 'gsq', 'ylt', 'jkl'],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=['1990-11-22', '1991-10-26',
'1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
self.test_frame2 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
data = [(
'2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),
('2000-01-04 00:00:00', -29, -0.0412318367011),
('2000-01-05 00:00:00', 20000, 0.731167677815),
('2000-01-06 00:00:00', -290867, 1.56762092543)]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [
{
'TextCol': 'first',
'DateCol': '2000-01-03 00:00:00',
'DateColWithTz': '2000-01-01 00:00:00-08:00',
'IntDateCol': 535852800,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': 1,
'BoolColWithNull': False,
},
{
'TextCol': 'first',
'DateCol': '2000-01-04 00:00:00',
'DateColWithTz': '2000-06-01 00:00:00-07:00',
'IntDateCol': 1356998400,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': None,
'BoolColWithNull': None,
},
]
for d in data:
self._get_exec().execute(
ins['query'],
[d[field] for field in ins['fields']]
)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS['read_parameters'][self.flavor]
params = ['Iris-setosa', 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS['read_named_parameters'][self.flavor]
params = {'name': 'Iris-setosa', 'length': 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_empty(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_query(
'SELECT * FROM test_frame_roundtrip')
result.set_index('level_0', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _to_sql_save_index(self):
df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],
columns=['A', 'B', 'C'], index=['A'])
self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
ix_cols = self._get_index_columns('test_to_sql_saves_index')
self.assertEqual(ix_cols, [['A', ], ])
def _transaction_test(self):
self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
# Make sure when transaction is rolled back, no rows get inserted
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise Exception('error')
except:
# ignore raised exception
pass
res = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res), 0)
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res2), 1)
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
mode = None
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn)
self.assertTrue(
sql.has_table('test_frame1', self.conn),
'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn),
'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, if_exists='replace')
self.assertTrue(
sql.has_table('test_frame3', self.conn),
'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, if_exists='append')
self.assertTrue(
sql.has_table('test_frame4', self.conn),
'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, 'test_frame5', self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_to_sql_panel(self):
panel = tm.makePanel()
self.assertRaises(NotImplementedError, sql.to_sql, panel,
'test_panel', self.conn)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
# Test date parsing in read_sq
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
self.assertFalse(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
self.assertTrue(issubclass(df.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_timedelta(self):
# see #6921
df = to_timedelta(
Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
def test_complex(self):
df = DataFrame({'a': [1 + 1j, 2j]})
# Complex data type should raise error
self.assertRaises(ValueError, df.to_sql, 'test_complex', self.conn)
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index_name',
"Index name not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
index=MultiIndex.from_product(
[('A0', 'A1'), ('B0', 'B1')]))
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'level_0')
self.assertEqual(frame.columns[1], 'level_1')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Specified index_labels not written to database")
# using the index name
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Index names not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'],
"Specified index_labels not written to database")
# wrong length of index_label
self.assertRaises(ValueError, sql.to_sql, temp_frame,
'test_index_label', self.conn, if_exists='replace',
index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],
columns=['A', 'B', 'C'], index=['A', 'B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A', 'B'])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
if_exists='replace')
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', con=self.conn)
self.assertTrue('CREATE' in create_sql)
def test_get_schema_dtypes(self):
float_frame = DataFrame({'a': [1.1, 1.2], 'b': [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'
create_sql = sql.get_schema(float_frame, 'test',
con=self.conn, dtype={'b': dtype})
self.assertTrue('CREATE' in create_sql)
self.assertTrue('INTEGER' in create_sql)
def test_get_schema_keys(self):
frame = DataFrame({'Col1': [1.1, 1.2], 'Col2': [2.1, 2.2]})
create_sql = sql.get_schema(frame, 'test', con=self.conn, keys='Col1')
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
self.assertTrue(constraint_sentence in create_sql)
# multiple columns as key (GH10385)
create_sql = sql.get_schema(self.test_frame1, 'test',
con=self.conn, keys=['A', 'B'])
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
self.assertTrue(constraint_sentence in create_sql)
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
df.to_sql('test_chunksize', self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query("select * from test_chunksize",
self.conn, chunksize=5):
res2 = concat([res2, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == 'sqlalchemy':
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn,
chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{'person_id': [1, 2, 3],
'person_name': ['John P. Doe', 'Jane Dove', 'John P. Doe']})
df2 = df.copy()
df2['person_name'] = df2['person_name'].astype('category')
df2.to_sql('test_categorical', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM test_categorical', self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=[u'\xe9', u'b'])
df.to_sql('test_unicode', self.conn, index=False)
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = 'sqlite'
mode = 'sqlalchemy'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
raise nose.SkipTest('SQLAlchemy not installed')
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
result = sql.read_sql_table('test_frame', self.conn, columns=cols)
self.assertEqual(result.columns.tolist(), cols,
"Columns not correctly selected")
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
result = sql.read_sql_table('test_frame', self.conn, index_col="index")
self.assertEqual(result.index.names, ["index"],
"index_col not correctly set")
result = sql.read_sql_table(
'test_frame', self.conn, index_col=["A", "B"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn,
index_col=["A", "B"],
columns=["C", "D"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
self.assertEqual(result.columns.tolist(), ["C", "D"],
"columns not set correctly whith index_col")
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table('other_table', self.conn)
sql.read_sql_query('SELECT * FROM other_table', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for other table")
def test_warning_case_insensitive_table_name(self):
# see GH7815.
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql('CaseSensitive', self.conn)
# Verify some things
self.assertEqual(
len(w), 0, "Warning triggered for writing a table")
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes('test_index_saved')
ixs = [i['column_names'] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
self.assertTrue(isinstance(
table.table.c['time'].type, sqltypes.DateTime))
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = 'sqlite:///' + name
table = 'iris'
test_frame1.to_sql(table, db_uri, if_exists='replace', index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = 'SELECT * FROM iris'
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with tm.assertRaisesRegexp(ImportError, "pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table('iris', metadata,
sa.Column('SepalLength', sa.REAL),
sa.Column('SepalWidth', sa.REAL),
sa.Column('PetalLength', sa.REAL),
sa.Column('PetalWidth', sa.REAL),
sa.Column('Name', sa.TEXT)
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text('select * from iris where name=:name')
iris_df = sql.read_sql(name_text, self.conn, params={
'name': 'Iris-versicolor'})
all_names = set(iris_df['Name'])
self.assertEqual(all_names, set(['Iris-versicolor']))
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam('name'))
iris_df = sql.read_sql(name_select, self.conn,
params={'name': 'Iris-setosa'})
all_names = set(iris_df['Name'])
self.assertEqual(all_names, set(['Iris-setosa']))
class _EngineToConnMixin(object):
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
def setUp(self):
super(_EngineToConnMixin, self).setUp()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
def tearDown(self):
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
super(_EngineToConnMixin, self).tearDown()
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = 'sqlite'
mode = 'fallback'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy",
conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;",
conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
def test_con_string_import_error(self):
if not SQLALCHEMY_INSTALLED:
conn = 'mysql://root@localhost/pandas_nosetest'
self.assertRaises(ImportError, sql.read_sql, "SELECT * FROM iris",
conn)
else:
raise nose.SkipTest('SQLAlchemy is installed')
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
self.assertRaises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test')
self.assertTrue('CREATE' in create_sql)
def _get_sqlite_column_type(self, schema, column):
for col in schema.split('\n'):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError('Column %s not found' % (column))
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
self.assertEqual(self._get_sqlite_column_type(schema, 'time'),
"TIMESTAMP")
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor = None
@classmethod
def setUpClass(cls):
cls.setup_import()
cls.setup_driver()
# test connection
try:
conn = cls.connect()
conn.connect()
except sqlalchemy.exc.OperationalError:
msg = "{0} - can't connect to {1} server".format(cls, cls.flavor)
raise nose.SkipTest(msg)
def setUp(self):
self.setup_connect()
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not installed')
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
raise nose.SkipTest(
"Can't connect to {0} server".format(self.flavor))
def test_aread_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
pandasSQL.drop_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={'i64': [2**62]})
df.to_sql('test_bigint', self.conn, index=False)
result = sql.read_sql_table('test_bigint', self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
self.assertEqual(col[0], Timestamp('2000-01-01 08:00:00'))
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
self.assertEqual(col[1], Timestamp('2000-06-01 07:00:00'))
elif is_datetime64tz_dtype(col.dtype):
self.assertTrue(str(col.dt.tz) == 'UTC')
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
self.assertEqual(col[0], Timestamp(
'2000-01-01 08:00:00', tz='UTC'))
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
self.assertEqual(col[1], Timestamp(
'2000-06-01 07:00:00', tz='UTC'))
else:
raise AssertionError("DateCol loaded with incorrect type "
"-> {0}".format(col.dtype))
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, 'DateColWithTz'):
raise nose.SkipTest("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgrsql server version difference
col = df.DateColWithTz
self.assertTrue(is_object_dtype(col.dtype) or
is_datetime64_dtype(col.dtype) or
is_datetime64tz_dtype(col.dtype),
"DateCol loaded with incorrect type -> {0}"
.format(col.dtype))
df = pd.read_sql_query("select * from types_test_data",
self.conn, parse_dates=['DateColWithTz'])
if not hasattr(df, 'DateColWithTz'):
raise nose.SkipTest("no column with datetime with time zone")
check(df.DateColWithTz)
df = pd.concat(list(pd.read_sql_query("select * from types_test_data",
self.conn, chunksize=1)),
ignore_index=True)
col = df.DateColWithTz
self.assertTrue(is_datetime64tz_dtype(col.dtype),
"DateCol loaded with incorrect type -> {0}"
.format(col.dtype))
self.assertTrue(str(col.dt.tz) == 'UTC')
expected = sql.read_sql_table("types_test_data", self.conn)
tm.assert_series_equal(df.DateColWithTz,
expected.DateColWithTz
.astype('datetime64[ns, UTC]'))
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'], errors='coerce')
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_table('test_date', self.conn)
# comes back as datetime64
tm.assert_series_equal(res['a'], to_datetime(df['a']))
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_table('test_time', self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == 'sqlite':
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({'s1': s1, 's2': s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({'A': [0, 1, 2], 'B': [0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({'A': [0, 1, 2], 'B': [np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df['B'] = df['B'].astype('object')
df['B'] = None
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({'A': [0, 1, 2], 'B': ['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# NaNs are coming back as None
df.loc[2, 'B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i['column_names'] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = 'test_get_schema_create_table'
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df,
check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.TEXT))
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': str})
# GH9083
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables['dtype_test3'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.String))
self.assertEqual(sqltype.length, 10)
# single dtype
df.to_sql('single_dtype_test', self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables['single_dtype_test'].columns['A'].type
sqltypeb = meta.tables['single_dtype_test'].columns['B'].type
self.assertTrue(isinstance(sqltypea, sqlalchemy.TEXT))
self.assertTrue(isinstance(sqltypeb, sqlalchemy.TEXT))
def test_notnull_dtype(self):
cols = {'Bool': Series([True, None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int': Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == 'mysql':
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
self.assertTrue(isinstance(col_dict['Bool'].type, my_type))
self.assertTrue(isinstance(col_dict['Date'].type, sqltypes.DateTime))
self.assertTrue(isinstance(col_dict['Int'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['Float'].type, sqltypes.Float))
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame({'f32': Series([V, ], dtype='float32'),
'f64': Series([V, ], dtype='float64'),
'f64_as_f32': Series([V, ], dtype='float64'),
'i32': Series([5, ], dtype='int32'),
'i64': Series([5, ], dtype='int64'),
})
df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
dtype={'f64_as_f32': sqlalchemy.Float(precision=23)})
res = sql.read_sql_table('test_dtypes', self.conn)
# check precision of float64
self.assertEqual(np.round(df['f64'].iloc[0], 14),
np.round(res['f64'].iloc[0], 14))
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables['test_dtypes'].columns
self.assertEqual(str(col_dict['f32'].type),
str(col_dict['f64_as_f32'].type))
self.assertTrue(isinstance(col_dict['f32'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['f64'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['i32'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['i64'].type, sqltypes.BigInteger))
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = 'SELECT test_foo_data FROM test_foo_data'
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name='test_foo_data',
con=connection, if_exists='append')
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({'test_foo_data': [0, 1, 2]}).to_sql(
'test_foo_data', self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = u'Hello, World!'
expected = DataFrame({'spam': [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = 'temp_test'
__table_args__ = {'prefixes': ['TEMPORARY']}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(
sql=sqlalchemy.select([Temporary.spam]),
con=conn,
)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
raise nose.SkipTest(
"Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy(object):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlalchemy.create_engine('sqlite:///:memory:')
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({'a': [1, 2]}, dtype='int64')
df.to_sql('test_bigintwarning', self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table('test_bigintwarning', self.conn)
self.assertEqual(len(w), 0, "Warning triggered for other table")
class _TestMySQLAlchemy(object):
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = 'mysql'
@classmethod
def connect(cls):
url = 'mysql+{driver}://root@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import pymysql # noqa
cls.driver = 'pymysql'
except ImportError:
raise nose.SkipTest('pymysql not installed')
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# MySQL has no real BOOL type (it's an alias for TINYINT)
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA = int column with NA values => becomes float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})
df.to_sql('test_procedure', self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy(object):
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = 'postgresql'
@classmethod
def connect(cls):
url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import psycopg2 # noqa
cls.driver = 'psycopg2'
except ImportError:
raise nose.SkipTest('psycopg2 not installed')
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({'col1': [1, 2], 'col2': [
0.1, 0.2], 'col3': ['a', 'n']})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql('test_schema_public', self.conn, index=False)
df.to_sql('test_schema_public_explicit', self.conn, index=False,
schema='public')
df.to_sql('test_schema_other', self.conn, index=False, schema='other')
# read dataframes back in
res1 = sql.read_sql_table('test_schema_public', self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,
schema='public')
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table('test_schema_other', self.conn,
schema='other')
tm.assert_frame_equal(df, res4)
self.assertRaises(ValueError, sql.read_sql_table, 'test_schema_other',
self.conn, schema='public')
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql('test_schema_other', self.conn, schema='other', index=False)
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='replace')
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='append')
res = sql.read_sql_table(
'test_schema_other', self.conn, schema='other')
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema='other')
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, 'test_schema_other2', index=False)
pdsql.to_sql(df, 'test_schema_other2',
index=False, if_exists='replace')
pdsql.to_sql(df, 'test_schema_other2',
index=False, if_exists='append')
res1 = sql.read_sql_table(
'test_schema_other2', self.conn, schema='other')
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_iris_data()
self._load_test1_data()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
self.assertTrue(self.pandasSQL.has_table('drop_test_frame'),
'Table not written to DB')
self.pandasSQL.drop_table('drop_test_frame')
self.assertFalse(self.pandasSQL.has_table('drop_test_frame'),
'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_query('SELECT * FROM test_date', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == 'mysql':
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_query('SELECT * FROM test_time', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' " +
"AND tbl_name = '%s'" % tbl_name, self.conn)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(
"PRAGMA index_info(%s)" % ix_name, self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
if PY36:
raise nose.SkipTest("not working on python > 3.5")
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute('PRAGMA table_info(%s)' % table)
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError('Table %s, column %s not found' % (table, column))
def test_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': 'STRING'})
# sqlite stores Boolean values as INTEGER
self.assertEqual(self._get_sqlite_column_type(
'dtype_test', 'B'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(
'dtype_test2', 'B'), 'STRING')
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': bool})
# single dtype
df.to_sql('single_dtype_test', self.conn, dtype='STRING')
self.assertEqual(
self._get_sqlite_column_type('single_dtype_test', 'A'), 'STRING')
self.assertEqual(
self._get_sqlite_column_type('single_dtype_test', 'B'), 'STRING')
def test_notnull_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = {'Bool': Series([True, None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int': Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
self.assertEqual(self._get_sqlite_column_type(tbl, 'Bool'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(
tbl, 'Date'), 'TIMESTAMP')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Int'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Float'), 'REAL')
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# Raise error on blank
self.assertRaises(ValueError, df.to_sql, "", self.conn)
for ndx, weird_name in enumerate(
['test_weird_name]', 'test_weird_name[',
'test_weird_name`', 'test_weird_name"', 'test_weird_name\'',
'_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"',
'99beginswithnumber', '12345', u'\xe9']):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', weird_name])
c_tbl = 'test_weird_col_name%d' % ndx
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
_formatters = {
datetime: lambda dt: "'%s'" % date_format(dt),
str: lambda x: "'%s'" % x,
np.str_: lambda x: "'%s'" % x,
compat.text_type: lambda x: "'%s'" % x,
compat.binary_type: lambda x: "'%s'" % x,
float: lambda x: "%.8f" % x,
int: lambda x: "%s" % x,
type(None): lambda x: "NULL",
np.float64: lambda x: "%.10f" % x,
bool: lambda x: "'%s'" % x,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isnull(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
def _skip_if_no_pymysql():
try:
import pymysql # noqa
except ImportError:
raise nose.SkipTest('pymysql not installed, skipping')
class TestXSQLite(SQLiteMixIn, tm.TestCase):
def setUp(self):
self.conn = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.ix[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY ("A", "B")' in create_sql)
cur = self.conn.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.conn)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, tquery, "select * from test",
con=self.conn)
finally:
sys.stdout = sys.__stdout__
# Initialize connection again (needed for tearDown)
self.setUp()
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.to_sql(frame, name='test_table', con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(lrange(len(frame2))) + 10
sql.to_sql(frame2, name='test_table2', con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
expected.index = Index(lrange(len(frame2))) + 10
expected.index.name = 'Idx'
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords', index=False)
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df = DataFrame([1, 2], columns=['c0'])
sql.to_sql(mono_df, con=self.conn, name='mono_df', index=False)
# computing the sum via sql
con_x = self.conn
the_sum = sum([my_c0[0]
for my_c0 in con_x.execute("select * from mono_df")])
# it should not fail, and gives 3 ( Issue #3628 )
self.assertEqual(the_sum, 3)
result = sql.read_sql("select * from mono_df", con_x)
tm.assert_frame_equal(result, mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame(
{'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn,
name=table_name, if_exists='fail')
self.assertRaises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='replace', index=False)
self.assertEqual(tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='replace', index=False)
self.assertEqual(tquery(sql_select, con=self.conn),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
self.assertEqual(tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='append', index=False)
self.assertEqual(tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
class TestSQLFlavorDeprecation(tm.TestCase):
"""
gh-13611: test that the 'flavor' parameter
is appropriately deprecated by checking the
functions that directly raise the warning
"""
con = 1234 # don't need real connection for this
funcs = ['SQLiteDatabase', 'pandasSQL_builder']
def test_unsupported_flavor(self):
msg = 'is not supported'
for func in self.funcs:
tm.assertRaisesRegexp(ValueError, msg, getattr(sql, func),
self.con, flavor='mysql')
def test_deprecated_flavor(self):
for func in self.funcs:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(sql, func)(self.con, flavor='sqlite')
@unittest.skip("gh-13611: there is no support for MySQL "
"if SQLAlchemy is not installed")
class TestXMySQL(MySQLMixIn, tm.TestCase):
@classmethod
def setUpClass(cls):
_skip_if_no_pymysql()
# test connection
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def setUp(self):
_skip_if_no_pymysql()
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
self.conn = pymysql.connect(host='localhost', user='root',
passwd='', db='pandas_nosetest')
except:
pass
else:
return
try:
self.conn = pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def test_basic(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_chunksize_read_type(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.index.name = "index"
drop_sql = "DROP TABLE IF EXISTS test"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.to_sql(frame, name='test', con=self.conn)
query = "select * from test"
chunksize = 5
chunk_gen = pd.read_sql_query(sql=query, con=self.conn,
chunksize=chunksize, index_col="index")
chunk_df = next(chunk_gen)
tm.assert_frame_equal(frame[:chunksize], chunk_df)
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.ix[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY (`A`, `B`)' in create_sql)
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.conn)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, tquery, "select * from test",
con=self.conn)
finally:
sys.stdout = sys.__stdout__
# Initialize connection again (needed for tearDown)
self.setUp()
def test_na_roundtrip(self):
_skip_if_no_pymysql()
pass
def _check_roundtrip(self, frame):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame, name='test_table', con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame2, name='test_table2',
con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
_skip_if_no_pymysql()
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords',
if_exists='replace', index=False)
def test_if_exists(self):
_skip_if_no_pymysql()
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame(
{'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
self.assertRaises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='replace', index=False)
self.assertEqual(tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='replace', index=False)
self.assertEqual(tquery(sql_select, con=self.conn),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
self.assertEqual(tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='append', index=False)
self.assertEqual(tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
notmatthancock/notmatthancock.github.io | code/py/roc-3.py | 1 | 1622 | import numpy as np
import matplotlib.pyplot as plt
rs = np.random.RandomState(1234)
p = 2
n = 1000
py1 = 0.6
mean1 = np.r_[1,1.]
mean0 = -mean1
# These are the parameters learned from before
w = np.r_[2.45641058, 1.55227045]
b = -0.824723538369
# Generate some testing data
Y = (rs.rand(n) > py1).astype(int)
X = np.zeros((n,p))
X[Y==0] = rs.multivariate_normal(mean0, np.eye(p), size=(Y==0).sum())
X[Y==1] = rs.multivariate_normal(mean1, np.eye(p), size=(Y==1).sum())
# This is the model's prediction on the test data.
T = 1 / (1. + np.exp(-b-np.dot(X,w)))
thresholds = np.linspace(1,0,101)
ROC = np.zeros((101,2))
for i in range(101):
t = thresholds[i]
# Classifier / label agree and disagreements for current threshold.
TP_t = np.logical_and( T > t, Y==1 ).sum()
TN_t = np.logical_and( T <=t, Y==0 ).sum()
FP_t = np.logical_and( T > t, Y==0 ).sum()
FN_t = np.logical_and( T <=t, Y==1 ).sum()
# Compute false positive rate for current threshold.
FPR_t = FP_t / float(FP_t + TN_t)
ROC[i,0] = FPR_t
# Compute true positive rate for current threshold.
TPR_t = TP_t / float(TP_t + FN_t)
ROC[i,1] = TPR_t
# Plot the ROC curve.
fig = plt.figure(figsize=(6,6))
plt.plot(ROC[:,0], ROC[:,1], lw=2)
plt.xlim(-0.1,1.1)
plt.ylim(-0.1,1.1)
plt.xlabel('$FPR(t)$')
plt.ylabel('$TPR(t)$')
plt.grid()
# Now let's compute the AUC score using the trapezoidal rule.
AUC = 0.
for i in range(100):
AUC += (ROC[i+1,0]-ROC[i,0]) * (ROC[i+1,1]+ROC[i,1])
AUC *= 0.5
plt.title('ROC curve, AUC = %.4f'%AUC)
plt.show()
#plt.savefig('../../images/roc-3.png', bbox_inches='tight')
| mit |
m3wolf/xanespy | xanespy/sxstm.py | 1 | 3383 | # -*- coding: utf-8 -*-
#
# Copyright © 2016 Mark Wolf
#
# This file is part of Xanespy.
#
# Xanespy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Xanespy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Xanespy. If not, see <http://www.gnu.org/licenses/>.
"""Tools for importing Soft X-ray Scanning Tunneling Microscopy data
as output from the sector 4 ID-C beamline."""
import struct
import re
import numpy as np
import pandas as pd
class SxstmDataFile():
def __init__(self, filename):
self.filename = filename
self.file = open(self.filename, mode="r+b")
def header_lines(self):
self.file.seek(0)
lines = self.file.readlines()[0:34]
# Convert from bytestring to unicode
lines = [l.decode('utf-8') for l in lines]
# Remove excess whitespace
lines = [l.strip() for l in lines]
assert lines[-1] == ':HEADER_END:', lines[-1]
return lines[:-1]
def dataframe(self):
# Find the start of the data section
self.file.seek(0)
last_line = ""
while last_line != ":HEADER_END:":
last_line = self.file.readline().decode('ascii').strip()
# Load list of columns
self.file.read(44) # Garbage??
channels = self.file.read(259)
channels = channels.split(b'\x00\x00\x00')
# Clean out unhelpful bytes from the column names
bad_chars = [0x0b, 0x0f, 0x12, 0x10, 0x08, 0x0c, 0x05, 0x1a]
clean_first = lambda b: b[1:] if b[0] in bad_chars else b
channels = list(map(clean_first, channels))
# Convert column names to unicode
channels = [c.decode('latin1') for c in channels]
# Read experimental data and convert to float/int/etc.
lines = self.file.read()
word_len = 4
fmt = ">%df" % (len(lines) / word_len)
numbers = struct.unpack_from(fmt, lines)
numbers = np.array(numbers)
# Reshape the values to be in the correct order for pandas
numbers = numbers.reshape((len(channels), -1))
numbers = numbers.swapaxes(0, 1)
# Create the pandas dataframe
df = pd.DataFrame(data=numbers, columns=channels)
return df
def channels(self):
hdr = self.header_lines()
reg = re.compile('^Channels="([a-zA-Z0-9 ();]+)"')
for l in hdr:
match = reg.match(l)
if match:
channels = match.group(1).split(';')
return channels
def num_points(self):
hdr = self.header_lines()
reg = re.compile('^Points=(\d+)')
for l in hdr:
match = reg.match(l)
if match:
return int(match.group(1))
def close(self, *args, **kwargs):
self.file.close(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
| gpl-3.0 |
wlamond/scikit-learn | examples/decomposition/plot_image_denoising.py | 1 | 5957 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
try: # SciPy >= 0.16 have face in misc
from scipy.misc import face
face = face(gray=True)
except ImportError:
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
efiring/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
bionet/ted.python | bionet/utils/video_io.py | 1 | 10403 | #!/usr/bin/env python
"""
Video I/O classes
=================
Classes for reading and writing video files into and from numpy [1]
ndarrays using OpenCV [2]_ and
matplotlib [3]_.
- ReadVideo - Read frames from a video into ndarrays.
- WriteVideo - Write ndarrays as frames of a video.
- WriteFigureVideo - Write matplotlib figures as frames of a video.
- video_capture - Capture video data from a webcam.
.. [1] http://numpy.scipy.org/
.. [2] http://opencv.willowgarage.com/wiki/PythonInterface/
.. [3] http://matplotlib.sf.net/
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
__all__ = ['ReadVideo', 'WriteVideo', 'WriteFigureVideo',
'video_capture']
import numpy as np
import cv
from numpy import floor
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import subprocess
import os
import tempfile
from time import time
from glob import glob
if not os.path.exists('/usr/bin/mencoder'):
raise RuntimeError('mencoder not found')
# See http://opencv.willowgarage.com/wiki/PythonInterface
# for more information on converting OpenCV image to ndarrays and
# vice versa.
class ReadVideo(object):
"""
Read frames from a video.
This class provides an interface for reading frames from a video
file using OpenCV and returning them as ndarrays.
Parameters
----------
filename : string
Name of input video file.
Methods
-------
get_frame_count()
Return the number of frames in the video.
get_frame_dims()
Return the dimensions of a frame in the video.
get_frame_height()
Return the height of a frame in the video.
get_frame_width()
Return the width of a frame in the video.
get_prop_fps()
Return the frame rate of the video.
read_cv_frame()
Read a frame from the video as an OpenCV frame.
read_np_frame()
Read a frame from the video as an ndarray.
"""
def __init__(self, filename):
self.capture = cv.CaptureFromFile(filename)
def get_frame_count(self):
"""Return the number of frames in the file."""
return int(cv.GetCaptureProperty(self.capture,
cv.CV_CAP_PROP_FRAME_COUNT))
def get_frame_width(self):
"""Return the width of a frame in the file in pixels."""
return int(cv.GetCaptureProperty(self.capture,
cv.CV_CAP_PROP_FRAME_WIDTH))
def get_frame_height(self):
"""Return the height of a frame in the file in pixels."""
return int(cv.GetCaptureProperty(self.capture,
cv.CV_CAP_PROP_FRAME_HEIGHT))
def get_frame_dims(self):
"""Return the dimensions of a frame in the file as
(height, width)."""
return self.get_frame_height(), self.get_frame_width()
def get_prop_fps(self):
"""Return the frame rate of the video file."""
return cv.GetCaptureProperty(self.capture,
cv.CV_CAP_PROP_FPS)
def __cv2array(self, img):
"""Convert an OpenCV image to an ndarray of dimensions
(height, width, channels)."""
depth2dtype = {
cv.IPL_DEPTH_8U: 'uint8',
cv.IPL_DEPTH_8S: 'int8',
cv.IPL_DEPTH_16U: 'uint16',
cv.IPL_DEPTH_16S: 'int16',
cv.IPL_DEPTH_32S: 'int32',
cv.IPL_DEPTH_32F: 'float32',
cv.IPL_DEPTH_64F: 'float64',
}
arrdtype = img.depth
a = np.fromstring(img.tostring(),
dtype=depth2dtype[img.depth],
count=img.width*img.height*img.nChannels)
# numpy assumes that the first dimension of an image is its
# height, i.e., the number of rows in the image:
a.shape = (img.height, img.width, img.nChannels)
return a
def __get_frame(self, n):
"""Retrieve the specified frame from the video."""
if n != None:
cv.SetCaptureProperty(self.capture,
cv.CV_CAP_PROP_POS_FRAMES, n)
return cv.QueryFrame(self.capture)
def read_np_frame(self, n=None):
"""Read a frame as a numpy array from the video."""
frame = self.__get_frame(n)
if frame != None:
return self.__cv2array(frame)
else:
return None
def read_cv_frame(self, n=None):
"""Read a frame as an OpenCV image from the video."""
frame = self.__get_frame(n)
return frame
class WriteVideo(object):
"""
Write frames to a video.
This class provides an interface for writing frames represented
as ndarrays to a video file using OpenCV.
Parameters
----------
filename : string
Name of output video file.
fourcc : tuple
Video codec of output video; default is ('M', 'J', 'P', 'G').
fps : float
Frame rate of output video; default is 30.0.
frame_size : tuple
Size of video frames (rows, columns); default is (256, 256).
is_color : bool
True of the video is color.
Methods
-------
write_cv_frame(a)
Write an OpenCV frame `a` to the video.
write_np_frame(a)
Write the frame represented as ndarray `a` to the video.
"""
def __init__(self, filename, fourcc=('D', 'I', 'V', 'X'),
fps=30.0, frame_size=(256,256), is_color=True):
self.writer = cv.CreateVideoWriter(filename,
cv.CV_FOURCC(*fourcc), fps, frame_size, int(is_color))
def __array2cv(self, a):
"""Convert an ndarray to an OpenCV image."""
dtype2depth = {
'uint8': cv.IPL_DEPTH_8U,
'int8': cv.IPL_DEPTH_8S,
'uint16': cv.IPL_DEPTH_16U,
'int16': cv.IPL_DEPTH_16S,
'int32': cv.IPL_DEPTH_32S,
'float32': cv.IPL_DEPTH_32F,
'float64': cv.IPL_DEPTH_64F,
}
try:
nChannels = a.shape[2]
except:
nChannels = 1
# numpy assumes that the first dimension of an image is its
# height, i.e., the number of rows in the image:
img = cv.CreateImageHeader((a.shape[1], a.shape[0]),
dtype2depth[str(a.dtype)],
nChannels)
cv.SetData(img, a.tostring(),
a.dtype.itemsize*nChannels*a.shape[1])
return img
def write_cv_frame(self, a):
"""Write an OpenCV image as a frame to the video."""
cv.WriteFrame(self.writer, a)
def write_np_frame(self, a):
"""Write a numpy array as a frame to the video."""
img = self.__array2cv(a)
cv.WriteFrame(self.writer, img)
class WriteFigureVideo(object):
"""
Write matplotlib figures as frames to a video.
This class provides an interface for writing frames represented as
matplotlib figures to a video file using mencoder.
Parameters
----------
filename : str
Output video file name.
dpi : float
Resolution at which to save each frame. This defaults to
the value assumed by `savefig`.
width : float
Frame width (in inches).
height : float
Frame height (in inches).
fps : float
Frames per second.
Methods
-------
write_fig(fig)
Write a matplotlib figure `fig` to the output video file.
create_video()
Create the output video file.
Notes
-----
This class is based upon the file movie_demo.py ((c) 2004 by Josh
Lifton) included with matplotlib. The output video file is not
actually assembled until the `close` method is called.
"""
def __init__(self, filename,
dpi=matplotlib.rcParams['savefig.dpi'], width=8.0,
height=6.0, fps=25):
self.filename = filename
self.dpi = dpi
self.width = 8.0
self.height = 6.0
self.fps = fps
self.tempdir = tempfile.mkdtemp()
self.frame_count = 0
def write_fig(self, fig):
"""Write a matplotlib figure to the output video file."""
if self.tempdir == None:
raise ValueError('cannot add frames to completed video file')
if not isinstance(fig, Figure):
raise ValueError('can only write instances of type '
'matplotlib.figure.Figure')
if fig.get_figwidth() != self.width:
raise ValueError('figure width must be %f' % self.width)
if fig.get_figheight() != self.height:
raise ValueError('figure height must be %f' % self.height)
canvas = FigureCanvasAgg(fig)
canvas.print_figure(self.tempdir + str("/%010d.png" % self.frame_count),
self.dpi)
self.frame_count += 1
def create_video(self):
"""Assemble the output video from the input frames."""
width_pix = int(floor(self.width*self.dpi))
height_pix = int(floor(self.height*self.dpi))
command = ('mencoder', 'mf://'+self.tempdir+'/*.png',
'-mf', 'type=png:w=%d:h=%d:fps=%d' % (width_pix, height_pix,
self.fps),
'-ovc', 'lavc', '-lavcopts', 'vcodec=mpeg4',
'-oac', 'copy', '-o', self.filename)
subprocess.check_call(command)
for f in glob(self.tempdir + '/*.png'):
os.remove(f)
os.rmdir(self.tempdir)
self.tempdir = None
def __del__(self):
"""Create the video before the class instance is destroyed."""
if self.tempdir:
self.create_video()
def video_capture(filename, t, fourcc=('D','I','V','X'), fps=30.0,
frame_size=(640, 480), is_color=True):
"""Capture a video of time length `t` from a webcam using OpenCV
and save it in `filename` using the specified format."""
camera = cv.CaptureFromCAM(0)
w = WriteVideo(filename, fourcc, fps, frame_size, is_color)
start = time()
end = start + t
while time() < end:
frame = cv.QueryFrame(camera)
w.write_cv_frame(frame)
| bsd-3-clause |
rubikloud/scikit-learn | sklearn/utils/extmath.py | 2 | 23356 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
decvalts/landlab | landlab/components/fire_generator/examples/fire_test_driver.py | 1 | 1339 | """ fire_test_driver.py
This is a sample driver simply showing the histogram of fire recurrence
values drawn from the Weibull distribution.
This uses the default file in the generate_fire.py component. Because we are
drawing from a stochastic, statistical distribution, the output may change
after the component is reloaded.
The default file is currently set using parameters from central Colorado,
given by Cannon et al., (2008) and Moody and Martin (2001). This sample driver
should not be applied at other sites without careful consideration.
"""
from landlab.components.fire_generator.generate_fire import FireGenerator
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
# Initializing the FireGenerator() class from fire_generator.py
FG = FireGenerator()
FG.initialize()
# Finding unknown scale parameter given the mean fire recurrence value
FG.get_scale_parameter()
# Finding a time series based on the Weibull distribution
FG.generate_fire_time_series()
# Sorting the fire series to test the distribution...
FG.firelist.sort()
# Plotting the histogram to show the distribution.
plt.hist(FG.firelist)
plt.title('Weibull Distribution of Fire Recurrence Values')
plt.xlabel('Histogram of Fire Recurrence (years)', fontsize=14)
plt.ylabel('Frequency of Fire Recurrece Values', fontsize=14)
plt.show()
| mit |
tedunderwood/GenreProject | python/workshop/predictauthors.py | 1 | 4957 | import re, os
import csv
import random, pickle
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from bagofwords import WordVector, StandardizingVector
root = '/Users/tunder/Dropbox/GLNworkshop/USpoetry'
magazines = ['Bookman', 'Century', 'ContVerse', 'Crisis', 'Fugitive', 'Harpers', 'LittleReview', 'LyricWest', 'Masses', 'Messenger', 'Midland', 'Nation', 'NewRepublic', 'Opportunity', 'Others', 'Poetry', 'Scribners', 'SmartSet']
def clean_text(raw):
raw = re.sub(r'I\\.', '', raw)
raw = re.sub(r'\\]', '', raw)
raw = re.sub(r'[0-9]', '', raw)
raw = re.sub(r'II', '', raw)
raw = re.sub(r'[,.;:\"?!*()]', '', raw)
raw = re.sub(r'-', ' ', raw)
raw = raw.replace("\\'s", '')
raw = raw.replace("\\'S", '')
raw = raw.replace('\n', ' ')
raw = raw.lower()
words = raw.split()
return words
def get_magazine(pathlist):
magazine = []
for path in pathlist:
with open(path, encoding = 'utf-8') as f:
poem = f.read()
words = clean_text(poem)
magazine.extend(words)
return magazine
def paths_to_wordbags(pathlist):
wordbags = list()
for path in pathlist:
with open(path, encoding = 'utf-8') as f:
poem = f.read()
words = clean_text(poem)
wordbags.append(words)
return wordbags
def normalized_prediction(wordbags, samplesize, iterations, model, standardizer, featurelist):
''' Repeatedly samples a set of texts, represented as wordbags. In each case it
constructs a composite text from the sample, and treats it as a volume object
for a model to make a prediction about. Then it averages those predictions.
The prediction is 'normalized' in the sense that it's made about texts that are roughly
the same size. A further refinement could randomly sample n words, so the texts are literally
the same length.
'''
n = samplesize
if n > len(wordbags):
n = len(wordbags)
allpredictions = list()
lengths = list()
for i in range(iterations):
compositetext = list()
sampleofbags = random.sample(wordbags, n)
for bag in sampleofbags:
compositetext.extend(bag)
volume = WordVector(compositetext)
volume.selectfeatures(featurelist)
volume.normalizefrequencies()
volume.standardizefrequencies(standardizer)
data = pd.concat([volume.features], axis = 1)
data = data.T
this_prob = model.predict_proba(data)[0][1]
allpredictions.append(this_prob)
lengths.append(len(compositetext))
meanprediction = sum(allpredictions) / len(allpredictions)
meanlength = sum(lengths) / len(lengths)
return meanprediction, meanlength
with open('/Users/tunder/Dropbox/GenreProject/python/reception/model1919/standardizer.p', mode = 'rb') as f:
standardizer = pickle.load(f)
with open('/Users/tunder/Dropbox/GenreProject/python/reception/model1919/logisticmodel.p', mode = 'rb') as f:
model = pickle.load(f)
featurelist = standardizer.features
pathsbyauthor = dict()
# Let's create a dictionary of authors that points to
# paths for all their works.
for magazine in magazines:
filename = magazine + 'Data.csv'
metadatapath = os.path.join(root, filename)
with open(metadatapath, encoding = 'latin-1') as f:
reader = csv.DictReader(f)
for row in reader:
if 'AUTHOR' not in row:
continue
if 'FILENAME' not in row:
continue
if len(row['FILENAME']) > 1:
folder = os.path.join(root, magazine)
filepath = os.path.join(folder, row['FILENAME'])
author = row['AUTHOR']
if author in pathsbyauthor:
pathsbyauthor[author].append(filepath)
else:
pathsbyauthor[author] = [filepath]
# Now, for each author, characterize the probability that they'll be
# reviewed in one of the magazines on our list. Use ten samples
# of ten randomly selected texts for each author.
authortetrads = list()
for author, pathlist in pathsbyauthor.items():
if len(pathlist) < 11:
continue
wordbags = paths_to_wordbags(pathlist)
author_prob, meanlength = normalized_prediction(wordbags, 10, 10, model, standardizer, featurelist)
authortetrads.append((author_prob, author, len(wordbags), meanlength))
authortetrads.sort()
with open('authorprobs.csv', mode='w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['author', 'probofreview', 'poems', 'samplewords'])
for tetrad in authortetrads:
probability, author, numtexts, meanlength = tetrad
print()
avglen = round(meanlength/10)
print(author + ", with " + str(numtexts) + ' texts, and ' + str(avglen) + ' average length.')
print(probability)
writer.writerow([author, probability, numtexts, meanlength])
| mit |
zaxtax/scikit-learn | examples/linear_model/plot_ols.py | 104 | 1936 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont/padova_cont_5/peaks_reader.py | 33 | 2761 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#input files
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
# ---------------------------------------------------
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
savetxt('peaks', max_values, delimiter='\t')
| gpl-2.0 |
nathansilberman/models | transformer/example.py | 7 | 2114 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from scipy import ndimage
import tensorflow as tf
from spatial_transformer import transformer
import numpy as np
import matplotlib.pyplot as plt
# %% Create a batch of three images (1600 x 1200)
# %% Image retrieved from:
# %% https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg
im = ndimage.imread('cat.jpg')
im = im / 255.
im = im.reshape(1, 1200, 1600, 3)
im = im.astype('float32')
# %% Let the output size of the transformer be half the image size.
out_size = (600, 800)
# %% Simulate batch
batch = np.append(im, im, axis=0)
batch = np.append(batch, im, axis=0)
num_batch = 3
x = tf.placeholder(tf.float32, [None, 1200, 1600, 3])
x = tf.cast(batch, 'float32')
# %% Create localisation network and convolutional layer
with tf.variable_scope('spatial_transformer_0'):
# %% Create a fully-connected layer with 6 output nodes
n_fc = 6
W_fc1 = tf.Variable(tf.zeros([1200 * 1600 * 3, n_fc]), name='W_fc1')
# %% Zoom into the image
initial = np.array([[0.5, 0, 0], [0, 0.5, 0]])
initial = initial.astype('float32')
initial = initial.flatten()
b_fc1 = tf.Variable(initial_value=initial, name='b_fc1')
h_fc1 = tf.matmul(tf.zeros([num_batch, 1200 * 1600 * 3]), W_fc1) + b_fc1
h_trans = transformer(x, h_fc1, out_size)
# %% Run session
sess = tf.Session()
sess.run(tf.initialize_all_variables())
y = sess.run(h_trans, feed_dict={x: batch})
# plt.imshow(y[0])
| apache-2.0 |
valle212/ChartLibrary | presentations/revealpres/animlibx/src/dataprep/WriteDfToJsonVar.py | 1 | 2138 | def WriteDfToJsonVar(dff,path,orientation,varname="lineDataObject"):
"""
Turn pandas data frame to variable for animlib path data object.
Accepts data frames with following structure:
- N + 1 columns: (x1,y1,...,yN) -> orientation="comx"
- N * 2 columns: (x1,y1),..., (xN,YN) -> orientation="sepx"
Columns need to be in order. Index is not to be used for storing values.
Writes named json object
varname = {"series1": [[x,y]...,[x,y]], ..., "seriesN": [[x,y]...,[x,y]]}
that can be directly used as animlib path data object.
"""
import pandas as pd
class MyException(Exception):
pass
json = '{'
if orientation == 'sepx':
for colpairno in range(int(len(dff.columns)/2)):
colloc = colpairno * 2
df_p = dff.iloc[:,colloc:colloc+2].copy()
seriesname = df_p.columns[1]
json_p = df_p.to_json(orient="values")
if colpairno != len(dff.columns)/2-1:
json = json + '"'+str(seriesname)+'":' + json_p + ', '
else:
json = json + '"'+str(seriesname)+'":' + json_p + '}'
if orientation == 'comx':
for colloc in range(1,len(dff.columns)):
df_p = dff.iloc[:,[colloc]].copy()
seriesname = df_p.columns[0]
df_p['x'] = dff.iloc[:,0].copy()
df_p = df_p[['x',seriesname]]
json_p = df_p.to_json(orient="values")
if colloc != len(dff.columns)-1:
json = json + '"'+str(seriesname)+'":' + json_p + ', '
else:
json = json + '"'+str(seriesname)+'":' + json_p + '}'
json = "let " + varname + " = " + json
with open(path, 'w') as outfile:
outfile.write(json) | apache-2.0 |
heli522/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
joshbohde/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 1 | 5976 | # Author: Vlad Niculae
# License: BSD
import sys
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
from .. import SparsePCA, MiniBatchSparsePCA, dict_learning_online
from ..sparse_pca import _update_code, _update_code_parallel
from ...utils import check_random_state
def generate_toy_data(n_atoms, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_atoms)
V = rng.randn(n_atoms, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_atoms):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', random_state=rng)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
spca = SparsePCA(n_components=3, n_jobs=2, random_state=rng).fit(Y)
U2 = spca.transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
spca = SparsePCA(n_components=3, n_jobs=2, random_state=rng).fit(Y)
U2 = spca.transform(Y)
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars', random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_sparse_code():
rng = np.random.RandomState(0)
dictionary = rng.randn(10, 3)
real_code = np.zeros((3, 5))
real_code.ravel()[rng.randint(15, size=6)] = 1.0
Y = np.dot(dictionary, real_code)
est_code_1 = _update_code(dictionary, Y, alpha=1.0)
est_code_2 = _update_code_parallel(dictionary, Y, alpha=1.0)
assert_equal(est_code_1.shape, real_code.shape)
assert_equal(est_code_1, est_code_2)
assert_equal(est_code_1.nonzero(), real_code.nonzero())
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_equal(model.components_, V_init)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
dictionaryT, codeT = dict_learning_online(X.T, n_atoms=8, alpha=1,
random_state=rng)
assert_equal(codeT.shape, (8, 12))
assert_equal(dictionaryT.shape, (10, 8))
assert_equal(np.dot(codeT.T, dictionaryT.T).shape, X.shape)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=rng).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2,
random_state=rng).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2,
random_state=rng).fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd',
random_state=rng).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
TaxIPP-Life/til-core | til_core/data/utils/misc.py | 1 | 7173 | # -*- coding:utf-8 -*-
from __future__ import print_function
'''
Created on 2 août 2013
@author: a.eidelman
'''
import logging
import numpy as np
from pandas import Series, DataFrame
from numpy.lib.stride_tricks import as_strided
import pandas as pd
import pdb
log = logging.getLogger(__name__)
def recode(var_in, list_el, method, dtype=None):
'''
code une variable à partir d'une autre
attention à la liste et à son ordre pour des méthode avec comparaison d'ordre
'''
if dtype is None:
dtype1 = var_in.dtype
# dtype1 = var_in.max()
output = Series(index=var_in.index, dtype=dtype)
for el in list_el:
val_in = el[0]
val_out = el[1]
if method is 'geq':
output[var_in >= val_in] = val_out
if method is 'eq':
output[var_in == val_in] = val_out
if method is 'leq':
output[var_in <= val_in] = val_out
if method is 'lth':
output[var_in < val_in] = val_out
if method is 'gth':
output[var_in > val_in] = val_out
if method is 'isin':
output[var_in.isin(val_in)] = val_out
return output
def index_repeated(nb_rep):
'''
Fonction qui permet de numeroter les réplications. Si [A,B,C] sont répliqués 3,4 et 2 fois alors la fonction retourne
[0,1,2,0,1,2,3,0,1] qui permet ensuite d'avoir
[[A,A,A,B,B,B,B,C,C],[0,1,2,0,1,2,3,0,1]] et d'identifier les replications
'''
id_rep = np.arange(nb_rep.max())
id_rep = as_strided(id_rep, shape=nb_rep.shape + id_rep.shape, strides=(0,) + id_rep.strides)
return id_rep[id_rep < nb_rep[:, None]]
def replicate(table):
columns_ini = table.columns
dtypes_ini = table.dtypes
nb_rep_table = np.asarray(table['nb_rep'], dtype=np.int64)
table_exp = np.asarray(table).repeat(nb_rep_table, axis=0)
table_exp = DataFrame(table_exp, columns = columns_ini, dtype = float)
# change pour avoir les dtype initiaux malgré le passage par numpy
for dtype in [np.int64, np.int32, np.int16, np.int8, np.float32, np.float16, np.float64]:
var_type = dtypes_ini == dtype
modif_types = dtypes_ini[var_type].index.tolist()
table_exp[modif_types] = table_exp[modif_types].astype(dtype)
table_exp['id_rep'] = index_repeated(nb_rep_table)
table_exp['id_ini'] = table_exp['id']
table_exp['id'] = table_exp.index
return table_exp
def _MinType_col_int_pos(col):
'''
retourne le type minimal d'une serie d'entier positif
on notera le -2 car on a deux valeurs prises par 0 et -1
cela dit, on retire une puissance de deux pour tenir compte des négatifs
je ne sais pas si on peut préciser qu'on code que des positifs.
'''
if max(abs(col)) < 2 ** 7 - 2:
return col.astype(np.int8)
elif max(abs(col)) < 2 ** 15 - 2:
return col.astype(np.int16)
elif max(abs(col)) < 2 ** 31 - 2:
return col.astype(np.int32)
else:
return col.astype(np.int64)
def minimal_dtype(table):
'''
Try to give columns the minimal type using -1 for NaN value
Variables with only two non null value are put into boolean asserting NaN value as False
Minimal type for float is not searched (only integer)
When integer has positive and negative value, there is no obvious default value for NaN values so nothing is done.
'''
assert isinstance(table, pd.DataFrame)
modif = {'probleme': [], 'boolean': [], 'int_one_sign': [], 'other_int': [], 'float': [], 'object': []}
for colname in table.columns:
col = table[colname]
if len(col.value_counts()) <= 1:
# TODO: pour l'instant bug si la valeur de départ était -1
col = col.fillna(value=-1)
modif['probleme'].append(colname)
if col.dtype == 'O':
# log.info(colname," is an object, with a good dictionnary, we could transform it into integer")
modif['object'].append(colname)
if col.dtype != 'O':
if len(col.value_counts()) == 2:
min = col.min()
col = col.fillna(value=min)
col = col - int(min)
# modif['boolean'].append(colname)
# table[colname] = col.astype(np.bool)
table[colname] = col.astype(np.int8)
else:
try:
if (col[col.notnull()].astype(int) == col[col.notnull()]).all():
try:
col.loc[col.notnull()] = col[col.notnull()].astype(int).values
except:
# dans ce cas, col est déjà un int et même plus petit que int32
pass
if col.min() >= 0 or col.max() <= 0: # un seul signe pour les valeurs
sign = 1 - 2 * (max(col) < 0)
col = col.fillna(value = -1 * sign)
modif['int_one_sign'].append(colname)
table[colname] = _MinType_col_int_pos(col)
else:
modif['other_int'].append(colname)
else:
if (col.isnull().any()):
modif['float'].append(colname)
else:
# TODO
modif['float'].append(colname)
except:
pdb.set_trace()
if modif['object']:
log.info('Object type columns have not been modified : \n {}'.format(modif['object']))
if modif['float']:
log.info('Float type columns have not been modified : \n {}'.format(modif['float']))
if modif['other_int']:
log.info('Integer type columns with positive AND negative values have not been modified : \n {}'.format(
modif['other_int']))
if modif['probleme']:
log.info('There is no much distinct values for following variables : \n {}'.format(modif['probleme']))
if modif['boolean']:
log.info('Note that these columns are transformed into boolean : \n {}'.format(modif['boolean']))
log.info('Note also that in these cases, missing value are set to False')
if modif['int_one_sign']:
log.info('Dtype have been also optimized for : \n {}'.format(modif['int_one_sign']))
log.info('Missing values were set to -1 (or +1 when only negative values)')
return table
def count_dup(data, var):
counts = data.groupby(var).size()
df2 = pd.DataFrame(counts, columns = ['size'])
var_rep = df2[df2.size > 1]
if len(var_rep) != 0:
print ("Nombre de valeurs apparaissant plusieurs fois pour : " + str(len(var_rep)))
return len(var_rep)
def drop_consecutive_row(data, var_dup):
'''
Remove a row if it's the same than the previous one for all
variables in var_dup
'''
to_drop = False
for var in var_dup:
to_drop = to_drop | (data[var].shift(1) != data[var])
data['block'] = (to_drop).astype(int).cumsum()
data = data.drop_duplicates('block')
data = data.drop('block', axis = 1)
return data
| gpl-3.0 |
santosfamilyfoundation/TrafficCloud | app/traffic_cloud_utils/plotting/thinkplot.py | 2 | 20031 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import pandas
import warnings
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class _Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#f7fbff', '#deebf7', '#c6dbef',
'#9ecae1', '#6baed6', '#4292c6',
'#2171b5','#08519c','#08306b'][::-1]
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
]
current_figure = None
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, num):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[num]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in _Brewer.')
@classmethod
def InitIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls, num):
"""Gets the color iterator."""
fig = pyplot.gcf()
if fig != cls.current_figure:
cls.InitIter(num)
cls.current_figure = fig
if cls.color_iter is None:
cls.InitIter(num)
return cls.color_iter
def _UnderrideColor(options):
"""If color is not in the options, chooses a color.
"""
if 'color' in options:
return options
# get the current color iterator; if there is none, init one
color_iter = _Brewer.GetIter(5)
try:
options['color'] = next(color_iter)
except StopIteration:
# if you run out of colors, initialize the color iterator
# and try again
warnings.warn('Ran out of colors. Starting over.')
_Brewer.ClearIter()
_UnderrideColor(options)
return options
def PrePlot(num=None, rows=None, cols=None):
"""Takes hints about what's coming.
num: number of lines that will be plotted
rows: number of rows of subplots
cols: number of columns of subplots
"""
if num:
_Brewer.InitIter(num)
if rows is None and cols is None:
return
if rows is not None and cols is None:
cols = 1
if cols is not None and rows is None:
rows = 1
# resize the image, depending on the number of rows and cols
size_map = {(1, 1): (8, 6),
(1, 2): (12, 6),
(1, 3): (12, 6),
(1, 4): (12, 5),
(1, 5): (12, 4),
(2, 2): (10, 10),
(2, 3): (16, 10),
(3, 1): (8, 10),
(4, 1): (8, 12),
}
if (rows, cols) in size_map:
fig = pyplot.gcf()
fig.set_size_inches(*size_map[rows, cols])
# create the first subplot
if rows > 1 or cols > 1:
ax = pyplot.subplot(rows, cols, 1)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
else:
ax = pyplot.gca()
return ax
def SubPlot(plot_number, rows=None, cols=None, **options):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
options: passed to subplot
"""
rows = rows or SUBPLOT_ROWS
cols = cols or SUBPLOT_COLS
return pyplot.subplot(rows, cols, plot_number, **options)
def _Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
global LOC
LOC = None
_Brewer.ClearIter()
pyplot.clf()
fig = pyplot.gcf()
fig.set_size_inches(8, 6)
def Figure(**options):
"""Sets options for the current figure."""
_Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def Plot(obj, ys=None, style='', **options):
"""Plots a line.
Args:
obj: sequence of x values, or Series, or anything with Render()
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
options = _UnderrideColor(options)
label = getattr(obj, 'label', '_nolegend_')
options = _Underride(options, linewidth=3, alpha=0.7, label=label)
xs = obj
if ys is None:
if hasattr(obj, 'Render'):
xs, ys = obj.Render()
if isinstance(obj, pandas.Series):
ys = obj.values
xs = obj.index
if ys is None:
pyplot.plot(xs, style, **options)
else:
pyplot.plot(xs, ys, style, **options)
def Vlines(xs, y1, y2, **options):
"""Plots a set of vertical lines.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
options: keyword args passed to pyplot.vlines
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=1, alpha=0.5)
pyplot.vlines(xs, y1, y2, **options)
def Hlines(ys, x1, x2, **options):
"""Plots a set of horizontal lines.
Args:
ys: sequence of y values
x1: sequence of x values
x2: sequence of x values
options: keyword args passed to pyplot.vlines
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=1, alpha=0.5)
pyplot.hlines(ys, x1, x2, **options)
def FillBetween(xs, y1, y2=None, where=None, **options):
"""Fills the space between two lines.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
where: sequence of boolean
options: keyword args passed to pyplot.fill_between
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.5)
pyplot.fill_between(xs, y1, y2, where, **options)
def Bar(xs, ys, **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
options: keyword args passed to pyplot.bar
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.6)
pyplot.bar(xs, ys, **options)
def Scatter(xs, ys=None, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
if ys is None and isinstance(xs, pandas.Series):
ys = xs.values
xs = xs.index
pyplot.scatter(xs, ys, **options)
def HexBin(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, cmap=matplotlib.cm.Blues)
pyplot.hexbin(xs, ys, **options)
def Pdf(pdf, **options):
"""Plots a Pdf, Pmf, or Hist as a line.
Args:
pdf: Pdf, Pmf, or Hist object
options: keyword args passed to pyplot.plot
"""
low, high = options.pop('low', None), options.pop('high', None)
n = options.pop('n', 101)
xs, ps = pdf.Render(low=low, high=high, n=n)
options = _Underride(options, label=pdf.label)
Plot(xs, ps, **options)
def Pdfs(pdfs, **options):
"""Plots a sequence of PDFs.
Options are passed along for all PDFs. If you want different
options for each pdf, make multiple calls to Pdf.
Args:
pdfs: sequence of PDF objects
options: keyword args passed to pyplot.plot
"""
for pdf in pdfs:
Pdf(pdf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, ys = hist.Render()
# see if the values support arithmetic
try:
xs[0] - xs[0]
except TypeError:
# if not, replace values with numbers
labels = [str(x) for x in xs]
xs = np.arange(len(xs))
pyplot.xticks(xs+0.5, labels)
if 'width' not in options:
try:
options['width'] = 0.9 * np.diff(xs).min()
except TypeError:
warnings.warn("Hist: Can't compute bar width automatically."
"Check for non-numeric types in Hist."
"Or try providing width option."
)
options = _Underride(options, label=hist.label)
options = _Underride(options, align='center')
if options['align'] == 'left':
options['align'] = 'edge'
elif options['align'] == 'right':
options['align'] = 'edge'
options['width'] *= -1
Bar(xs, ys, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ys = pmf.Render()
low, high = min(xs), max(xs)
width = options.pop('width', None)
if width is None:
try:
width = np.diff(xs).min()
except TypeError:
warnings.warn("Pmf: Can't compute bar width automatically."
"Check for non-numeric types in Pmf."
"Or try providing width option.")
points = []
lastx = np.nan
lasty = 0
for x, y in zip(xs, ys):
if (x - lastx) > 1e-5:
points.append((lastx, 0))
points.append((x, 0))
points.append((x, lasty))
points.append((x, y))
points.append((x+width, y))
lastx = x + width
lasty = y
points.append((lastx, 0))
pxs, pys = zip(*points)
align = options.pop('align', 'center')
if align == 'center':
pxs = np.array(pxs) - width/2.0
if align == 'right':
pxs = np.array(pxs) - width
options = _Underride(options, label=pmf.label)
Plot(pxs, pys, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
xs = np.asarray(xs)
ps = np.asarray(ps)
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs = np.delete(xs, -1)
ps = np.delete(ps, -1)
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs = xp.delete(xs, 0)
ps = np.delete(ps, 0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
options = _Underride(options, label=cdf.label)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.keys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Text(x, y, s, **options):
"""Puts text in a figure.
x: number
y: number
s: string
options: keyword args passed to pyplot.text
"""
options = _Underride(options,
fontsize=16,
verticalalignment='top',
horizontalalignment='left')
pyplot.text(x, y, s, **options)
LEGEND = True
LOC = None
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis', 'xlim', 'ylim']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
global LEGEND
LEGEND = options.get('legend', LEGEND)
if LEGEND:
global LOC
LOC = options.get('loc', LOC)
pyplot.legend(loc=LOC)
# x and y ticklabels can be made invisible
val = options.get('xticklabels', None)
if val is not None:
if val == 'invisible':
ax = pyplot.gca()
labels = ax.get_xticklabels()
pyplot.setp(labels, visible=False)
val = options.get('yticklabels', None)
if val is not None:
if val == 'invisible':
ax = pyplot.gca()
labels = ax.get_yticklabels()
pyplot.setp(labels, visible=False)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
pyplot.show()
if clf:
Clf()
def Plotly(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
import plotly.plotly as plotly
url = plotly.plot_mpl(pyplot.gcf())
if clf:
Clf()
return url
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats and clears the figure.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
save_options = {}
for option in ['bbox_inches', 'pad_inches']:
if option in options:
save_options[option] = options.pop(option)
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
try:
formats.remove('plotly')
Plotly(clf=False)
except ValueError:
pass
if root:
for fmt in formats:
SaveFormat(root, fmt, **save_options)
if clf:
Clf()
def SaveFormat(root, fmt='eps', **options):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
_Underride(options, dpi=300)
filename = '%s.%s' % (root, fmt)
print('Writing', filename)
pyplot.savefig(filename, format=fmt, **options)
# provide aliases for calling functions with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
vlines = Vlines
hlines = Hlines
fill_between = FillBetween
text = Text
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = _Brewer.ColorGenerator(7)
for color in color_iter:
print(color)
if __name__ == '__main__':
main()
| mit |
YouCantCodeWithUs/KMCIslandGrowth | 2DGrowth.py | 1 | 12728 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import sys
import random
import numpy as np
from multiprocessing import Pool
from matplotlib import pyplot as plt
import pprint
import Constants as gv
import Periodic
import Bins
import Energy
pool = Pool(1)
def InitSubstrate():
'''
Initialize substrate atom positions in a hexagonal grid below y = 0
w: Int - number of atoms in a horizontal row
h: Int - number of rows
r_s: Float - spacing between atoms
returns: np.array(np.array[x, y]) - list of substrate atom positions
'''
R = []
roffset = gv.r_s/2
for i in range(gv.Hs):
y = -i*gv.r_s*gv.sqrt3/2
for x in range(gv.W):
Ri = x*gv.r_s
if i%2 == 1:
Ri += roffset
R.append(Ri); R.append(y)
return Periodic.PutAllInBox(np.array(R))
def DepositionRate():
'''
Returns a calculated value based on estimated wafer size and flux rate.
'''
return 6.0*18/gv.W*gv.deposition_rate
return float(gv.W)*MLps
def SurfaceAtoms(adatoms, substrate_bins):
'''
Identifies surface adatoms.
Surface atoms are defined as adatoms coordinated by < 5 other atoms.
adatoms: np.array(np.array[x, y]) - position of adatoms
substrate_bins: list(list(np.array[x, y])) - bin'd position of substrate atoms
returns: List[np.array[x, y]] - list of positions of surface adatoms.
'''
surfaceAtoms = []
(nearest_adatoms, nearest_substrate) = Bins.NearestNeighbors(adatoms, substrate_bins)
for i in range(len(adatoms)/2):
if len(nearest_adatoms[i])/2 + len(nearest_substrate[i])/2 < 6:
surfaceAtoms.append(adatoms[2*i])
surfaceAtoms.append(adatoms[2*i+1])
return np.array(surfaceAtoms)
def DepositAdatom(adatoms, substrate_bins):
'''
Deposits new adatom onto the surface, then performs a relaxation.
adatoms: np.array(np.array[x, y]) - position of adatoms
substrate_bins: list(list(np.array[x, y])) - bin'd position of substrate atoms
returns: np.array(np.array[x, y]) - position of adatoms with additional adatom
'''
min_y = 0
if len(adatoms) > 0:
min_y = min([adatoms[2*i+1] for i in range(len(adatoms)/2)])
new_x = Periodic.PutInBox(random.random()*gv.L)
new_y = 2*gv.r_as + min_y
adatom_bins = Bins.PutInBins(adatoms)
closeby = False
while not closeby:
Ri = np.array([new_x, new_y])
nearby_adatoms = Bins.NearbyAtoms(new_x, new_y, adatom_bins)
d = Periodic.Distances(Ri, nearby_adatoms)[0]
if len(d) > 0 and min(d) < 2*gv.r_a:
closeby = True
nearby_substrate = Bins.NearbyAtoms(new_x, new_y, substrate_bins)
d = Periodic.Distances(Ri, nearby_substrate)[0]
if len(d) > 0 and min(d) < 2*gv.r_as:
closeby = True
if not closeby:
new_y -= gv.r_a/2
adatoms = np.append(adatoms, Ri)
adatoms = Relaxation(adatoms, substrate_bins)
return adatoms
def Relaxation(adatoms, substrate_bins, scale=16, threshold=1e-4):
'''
Relaxes the deposited adatoms into the lowest energy position using a conjugate gradient algorithm.
Runs recursively if the step size is too small.
Uses a multiprocessing pool for extra speed.
adatoms: np.array(np.array[x, y]) - position of adatoms
substrate_bins: list(list(np.array[x, y])) - bin'd position of substrate atoms
scale: int - How much to scale the step size down by
returns: np.array(np.array[x, y]) - position of adatoms with additional adatom
'''
global pool
if scale > 1024:
print 'scale exceeded, reducing threshold'
return Relaxation(adatoms, substrate_bins, scale=16, threshold=threshold*10)
# If the energy of a proposed relaxed arrangement exceeds this Ui, then halve the stepsize and start over.
Ui = Energy.TotalEnergy(adatoms, substrate_bins)
N = len(adatoms)/2
xn = adatoms.copy()
# Initial forces on each atom
dx0 = Energy.AdatomAdatomForces(xn) + Energy.AdatomSubstrateForces(xn, substrate_bins)
xs = [(adatoms + a*dx0/20/scale, substrate_bins) for a in range(20)]
ys = pool.map(Energy.RelaxEnergy, xs)
(xnp, a) = xs[ys.index(min(ys))]
sn = dx0[:]
snp = dx0[:]
# Force on each atom in the lowest energy position
dxnp = Energy.AdatomAdatomForces(xnp) + Energy.AdatomSubstrateForces(xnp, substrate_bins)
maxF = max([np.dot(dxnp[2*i:2*i+2], dxnp[2*i:2*i+2]) for i in range(len(dxnp))])
lastmaxF = maxF
while maxF > threshold:
# U = Energy.TotalEnergy(adatoms, substrate_bins)
# if U > Ui:
# print 'changing scale', scale*2, maxF
# return Relaxation(adatoms, substrate_bins, scale=scale*2, threshold=threshold)
max_y = min([xnp[2*i+1] for i in range(len(xnp)/2)])
if max_y > gv.Dmax:
print 'atom out of bounds', scale*2
return Relaxation(adatoms, substrate_bins, scale=scale*2, threshold=threshold)
# Calculate the conjugate direction step size, beta
top = np.dot(xnp, (xnp - xn))
bot = np.dot(xn, xn)
beta = top/bot
# Update conjugate direction
snp = dxnp + beta*sn
xn = xnp.copy()
sn = snp.copy()
xs = [(xn + a*sn/20/scale, substrate_bins) for a in range(20)]
ys = pool.map(Energy.RelaxEnergy, xs)
# plt.plot(ys); plt.show()
(xmin, a) = xs[ys.index(min(ys))]
xnp = xmin.copy()
# Calculate forces at new lowest energy position
dxnp = Energy.AdatomAdatomForces(xmin) + Energy.AdatomSubstrateForces(xmin, substrate_bins)
maxF = max([np.dot(dxnp[2*i:2*i+2], dxnp[2*i:2*i+2]) for i in range(len(dxnp))])
print maxF
if abs(lastmaxF - maxF) < 1e-8:
print 'relaxation stalled, changing scale', scale*2, maxF
return Relaxation(adatoms, substrate_bins, scale=scale*2, threshold=threshold)
lastmaxF = maxF
return Periodic.PutAllInBox(xnp)
def LocalRelaxation(adatoms, substrate_bins, around):
'''
Relaxes the deposited adatoms into the lowest energy position using a conjugate gradient algorithm.
Performs an additional global relaxation is global forces are too large.
adatoms: np.array(np.array[x, y]) - position of adatoms
substrate_bins: list(list(np.array[x, y])) - bin'd position of substrate atoms
around: np.array[x, y] - position around which to perform the relaxation
returns: np.array(np.array[x, y]) - positions of relaxed adatoms
'''
nearby_indices = []
relaxing_adatoms = []
adatom_bins = Bins.PutInBins(adatoms)
nearby_adatoms = Bins.NearbyAtoms(around[0], around[1], adatom_bins)
nearby_indices = [list(Ds).index(0) for Ds in Periodic.Distances(nearby_adatoms, adatoms)]
nearby_indices = sorted(nearby_indices, reverse=True)
for i in nearby_indices:
relaxing_adatoms.append(adatoms[2*i])
relaxing_adatoms.append(adatoms[2*i+1])
adatoms = np.delete(adatoms, 2*i)
adatoms = np.delete(adatoms, 2*i)
relaxing_adatoms = np.array(relaxing_adatoms)
relaxing_adatoms = Relaxation(relaxing_adatoms, substrate_bins, scale=4)
adatoms = np.append(adatoms, relaxing_adatoms)
forces = Energy.AdatomAdatomForces(adatoms) + Energy.AdatomSubstrateForces(adatoms, substrate_bins)
maxF = max([np.dot(forces[2*i:2*i+2], forces[2*i:2*i+2]) for i in range(len(forces))])
if maxF > 1e-3:
print 'global required'
adatoms = Relaxation(adatoms, substrate_bins, scale=4, threshold=1e-4)
return adatoms
def HoppingRates(adatoms, substrate_bins):
'''
Calculates the hopping rate of each surface adatom.
adatoms: np.array(np.array[x, y]) - position of adatoms
substrate_bins: list(list(np.array[x, y])) - bin'd position of substrate atoms
returns: list(float) - hopping rate of each surface atom
'''
omega = 1.0e12
surf = SurfaceAtoms(adatoms, substrate_bins)
if len(surf) < 1:
return []
surf_indices = [list(Ds).index(0) for Ds in Periodic.Distances(surf, adatoms)]
return [omega*np.exp(Energy.DeltaU(i, adatoms, substrate_bins)*gv.beta) for i in surf_indices]
def HoppingPartialSums(Rk):
'''
Cumulative sum hopping rates. Used to determine whether to hop or deposit.
'''
return [0] + [sum(Rk[:i+1]) for i in range(len(Rk))]
def TotalRate(Rd, Rk):
'''
Addition!
'''
return Rd + sum(Rk)
def HopAtom(i, adatoms, substrate_bins):
'''
Moves a surface adatom and to a nearby spot. Performs a local relaxation around the new position.
i: int - index of adatom to hop
adatoms: np.array(np.array[x, y]) - position of adatoms
substrate_bins: list(list(np.array[x, y])) - bin'd position of substrate atoms
returns: np.array(np.array[x, y]) - positions of relaxed adatoms after hop
'''
surf = SurfaceAtoms(adatoms, substrate_bins)
jumper = np.array(adatoms[2*i:2*i+2])
adatoms = np.delete(adatoms, 2*i); adatoms = np.delete(adatoms, 2*i)
Ds = Periodic.Distances(jumper, surf)[0]
potential_jump_to = []
for i in range(len(Ds)):
if Ds[i] < 3*gv.r_a:
potential_jump_to.append(surf[2*i:2*i+2])
jump_to_surf = potential_jump_to[random.randint(0, len(potential_jump_to)-1)]
jump_directions = [i*np.pi/3 for i in range(6)]
jump_vectors = [np.array([np.cos(t), np.sin(t)])*gv.r_a for t in jump_directions]
jump_positions = [jump_to_surf + v for v in jump_vectors]
jump_energies = [Energy.HoppingEnergy(p, adatoms, substrate_bins) for p in jump_positions]
jump_to_pos = jump_positions[jump_energies.index(min(jump_energies))]
jump_to_pos = Periodic.PutAllInBox(jump_to_pos)
adatoms = np.append(adatoms, jump_to_pos)
adatoms = Relaxation(adatoms, substrate_bins)
return adatoms
def PlotSubstrate(substrate, color='blue'):
'''
Pretty plots.
'''
PlotAtoms(substrate, color)
for x in range(gv.nbins_x + 2):
plt.plot([x*gv.bin_size - gv.L/2, x*gv.bin_size - gv.L/2], [gv.Dmin, gv.nbins_y*gv.bin_size + gv.Dmin], color='red')
for y in range(gv.nbins_y + 1):
plt.plot([-gv.L/2, (gv.nbins_x + 1)*gv.bin_size - gv.L/2], [y*gv.bin_size + gv.Dmin, y*gv.bin_size + gv.Dmin], color='red')
# plt.show()
def PlotAtoms(atoms, color='blue'):
'''
Pretty plots.
'''
for i in range(len(atoms)/2):
plt.scatter(atoms[2*i], atoms[2*i+1], color=color)
def PlotEnergy(adatoms, substrate_bins):
'''
Pretty plots.
'''
ymin = min(a[1] for a in adatoms) + gv.r_a
xs = np.linspace(-gv.L/2, gv.L/2, 500)
pos = [(np.array([x, ymin]), adatoms, substrate_bins) for x in xs]
Es = pool.map(Energy.RelaxEnergy, pos)
plt.plot(xs, Es)
plt.axis([min(xs), max(xs), min(Es)-0.1, max(Es)+0.1])
# plt.show()
plt.savefig(gv.folder + 'E%.2i.png'%t)
plt.clf()
def PlotStresses(adatoms, substrate, substrate_bins):
PlotAtoms(substrate, 'blue')
Fs = Energy.AdatomAdatomForces(adatoms) + Energy.AdatomSubstrateForces(adatoms, substrate_bins)
Fs = [np.sqrt(Fs[2*i]**2 + Fs[2*i+1]**2) for i in range(len(Fs)/2)]
Fs = [np.log(f*1e4) for f in Fs]
Fs = ['#e7%.2x%.2x'%(max(0, min(255, f*8+79)), max(0, min(255, f*8+53))) for f in Fs]
for i in range(len(Fs)):
PlotAtoms(adatoms[2*i:2*i+2], Fs[i])
plt.axis([-gv.L/2, gv.L/2, gv.Dmin, gv.Dmax])
def average(l):
return sum(l)/len(l)
substrate = InitSubstrate()
substrate_bins = Bins.PutInBins(substrate)
surf_substrate = SurfaceAtoms(substrate, substrate_bins)
adatoms = np.array([])
# adatoms = InitSubstrate()[:gv.W*4]
# for i in range(len(adatoms)/2):
# adatoms[2*i] -= gv.r_a/2
# adatoms[2*i+1] += gv.r_a*gv.sqrt3
# adatoms = Relaxation(adatoms, substrate_bins)
t = 0
while True:
Rk = HoppingRates(adatoms, substrate_bins)
pj = HoppingPartialSums(Rk)
Rd = DepositionRate()
Rtot = TotalRate(Rd, Rk)
r = random.random()*Rtot
print t, pj[-1], Rd, r
HoppingAtom = [p for p in pj if p > r]
if len(HoppingAtom) > 0:
print 'HOP'
adatoms = HopAtom(pj.index(HoppingAtom[0])-1, adatoms, substrate_bins)
else:
adatoms = DepositAdatom(adatoms, substrate_bins)
# if i % 5 == 0:
PlotSubstrate(substrate, 'blue')
PlotAtoms(adatoms, 'green')
surf = SurfaceAtoms(adatoms, substrate_bins)
PlotAtoms(surf, 'red')
plt.axis([-gv.L/2-0.1, gv.L/2+0.1, gv.Dmin-0.1, gv.Dmax+3*gv.r_as])
plt.savefig(gv.folder + '%.2i.png'%t)
# plt.show()
plt.clf()
PlotStresses(adatoms, substrate, substrate_bins)
plt.savefig(gv.folder + 'F%.2i.png'%t)
# PlotEnergy(adatoms, substrate_bins)
t += 1
# minimum energy position
# xs = np.linspace(0, gv.L, 500)
# Es = [Energy.AdatomSurfaceEnergy(np.array([x, gv.r_as]), substrate_bins, gv.E_as, gv.r_as) for x in xs]
# xmin = xs[Es.index(min(Es))]
# xs = np.linspace(xmin-0.1, xmin+0.1, 500)
# Es = [Energy.AdatomSurfaceEnergy(np.array([x, gv.r_as]), substrate_bins, gv.E_as, gv.r_as) for x in xs]
# xmin = xs[Es.index(min(Es))]
# ys = np.linspace(gv.r_as/2, gv.r_as*2, 500)
# Es = [Energy.AdatomSurfaceEnergy(np.array([xmin, y]), substrate_bins, gv.E_as, gv.r_as) for y in ys]
# ymin = ys[Es.index(min(Es))]
# ys = np.linspace(ymin-0.1, ymin+0.1, 500)
# Es = [Energy.AdatomSurfaceEnergy(np.array([xmin, y]), substrate_bins, gv.E_as, gv.r_as) for y in ys]
# ymin = ys[Es.index(min(Es))]
# print xmin/gv.r_as, ymin/gv.r_as
# print Energy.AdatomSurfaceEnergy(np.array([xmin, ymin]), substrate_bins, gv.E_as, gv.r_as)
# print Energy.AdatomSurfaceForce(np.array([xmin, ymin]), substrate_bins, gv.E_as, gv.r_as)
# plt.plot(ys, Es)
# plt.show() | mit |
ifp-uiuc/ifp_toolbox | ifp_toolbox/general/__init__.py | 1 | 1682 | import numpy
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
class RectangleSelector(object):
"""
This is a class that allows the user to interactively select a
rectangular region in an image and returns the coordinates of the
upper-left corner and the bottom-right corner.
Code snippet taken from:
http://stackoverflow.com/questions/12052379/
matplotlib-draw-a-selection-area-in-the-shape-of-a-rectangle-with-the-mouse
"""
def __init__(self):
self.ax = plt.gca()
self.rect = Rectangle((0, 0), 1, 1)
self.customize_rectangle()
self.x0 = None
self.y0 = None
self.x1 = None
self.y1 = None
self.ax.add_patch(self.rect)
self.ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax.figure.canvas.mpl_connect('button_release_event',
self.on_release)
def customize_rectangle(self):
self.rect.set_fill(False)
self.rect.set_edgecolor('blue')
self.rect.set_linewidth(3.0)
def on_press(self, event):
print 'press'
self.x0 = event.xdata
self.y0 = event.ydata
def on_release(self, event):
print 'release'
self.x1 = event.xdata
self.y1 = event.ydata
self.rect.set_width(self.x1 - self.x0)
self.rect.set_height(self.y1 - self.y0)
self.rect.set_xy((self.x0, self.y0))
self.ax.figure.canvas.draw()
def get_coords(self):
return [numpy.floor(self.x0),
numpy.floor(self.y0),
numpy.ceil(self.x1),
numpy.ceil(self.y1)]
| bsd-3-clause |
joshwalawender/RasPiProjects | Kegerator.py | 1 | 19736 | #!/usr/env/python
from __future__ import division, print_function
## Import General Tools
import sys
import os
import argparse
import logging
import numpy as np
import datetime
import math
import RPi.GPIO as GPIO
import DHT22
import DS18B20
import Carriots
import humidity
import astropy.io.ascii as ascii
import astropy.table as table
temp_high = 42.0
temp_low = 38.0
##-------------------------------------------------------------------------
## Main Program
##-------------------------------------------------------------------------
def main(args):
# temp_high = 42.0
# temp_low = 38.0
status = 'unknown'
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT)
##-------------------------------------------------------------------------
## Create logger object
##-------------------------------------------------------------------------
logger = logging.getLogger('MyLogger')
logger.setLevel(logging.DEBUG)
## Set up console output
LogConsoleHandler = logging.StreamHandler()
if args.verbose:
LogConsoleHandler.setLevel(logging.DEBUG)
else:
LogConsoleHandler.setLevel(logging.INFO)
LogFormat = logging.Formatter('%(asctime)23s %(levelname)8s: %(message)s')
LogConsoleHandler.setFormatter(LogFormat)
logger.addHandler(LogConsoleHandler)
## Set up file output
now = datetime.datetime.now()
DateString = '{}'.format(now.strftime('%Y%m%d'))
TimeString = '{} HST'.format(now.strftime('%H:%M:%S'))
LogFileName = os.path.join('/', 'var', 'log', 'Kegerator', 'Log_{}.txt'.format(DateString))
LogFileHandler = logging.FileHandler(LogFileName)
LogFileHandler.setLevel(logging.DEBUG)
LogFileHandler.setFormatter(LogFormat)
logger.addHandler(LogFileHandler)
##-------------------------------------------------------------------------
## Get Temperature and Humidity Values
##-------------------------------------------------------------------------
logger.info('#### Reading Temperature and Humidity Sensors ####')
temperatures_F = []
try:
logger.debug('Reading DHT22')
DHT = DHT22.DHT22(pin=18)
DHT.read()
logger.debug(' Temperature = {:.3f} F, Humidity = {:.1f} %'.format(DHT.temperature_F, DHT.humidity))
temperatures_F.append(DHT.temperature_F)
RH = DHT.humidity
AH = humidity.relative_to_absolute_humidity(DHT.temperature_C, DHT.humidity)
logger.debug(' Absolute Humidity = {:.2f} g/m^3'.format(AH))
except:
RH = float('nan')
AH = float('nan')
logger.debug('Reading DS18B20')
sensor = DS18B20.DS18B20()
sensor.read()
for temp in sensor.temperatures_C:
logger.debug(' Temperature = {:.3f} F'.format(temp*9./5.+32.))
temperatures_F.append(temp*9./5.+32.)
##-------------------------------------------------------------------------
## Record Values to Table
##-------------------------------------------------------------------------
datafile = os.path.join('/', 'var', 'log', 'Kegerator', '{}.txt'.format(DateString))
logger.debug("Preparing astropy table object for data file {}".format(datafile))
if not os.path.exists(datafile):
logger.info("Making new astropy table object")
SummaryTable = table.Table(names=('date', 'time', 'AmbTemp', 'KegTemp', 'KegTemp1', 'KegTemp2', 'KegTemp3', 'RH', 'AH', 'status'), \
dtype=('S10', 'S12', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'S8') )
else:
logger.debug("Reading astropy table object from file: {0}".format(datafile))
try:
SummaryTable = ascii.read(datafile, guess=False,
header_start=0, data_start=1,
Reader=ascii.basic.Basic,
converters={
'date': [ascii.convert_numpy('S10')],
'time': [ascii.convert_numpy('S12')],
'AmbTemp': [ascii.convert_numpy('f4')],
'KegTemp': [ascii.convert_numpy('f4')],
'KegTemp1': [ascii.convert_numpy('f4')],
'KegTemp2': [ascii.convert_numpy('f4')],
'KegTemp3': [ascii.convert_numpy('f4')],
'hum': [ascii.convert_numpy('f4')],
'AH': [ascii.convert_numpy('f4')],
'status': [ascii.convert_numpy('S11')],
})
except:
logger.critical("Failed to read data file: {0} {1} {2}".format(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))
##-------------------------------------------------------------------------
## Turn Kegerator Relay On or Off Based on Temperature
##-------------------------------------------------------------------------
temperatures_F.sort()
ambient_temperature = temperatures_F.pop()
assert ambient_temperature > max(temperatures_F)
logger.info('Ambient Temperature = {:.1f}'.format(ambient_temperature))
for temp in temperatures_F:
logger.info('Kegerator Temperatures = {:.1f} F'.format(temp))
temperature = np.median(temperatures_F)
logger.info('Median Temperature = {:.1f} F'.format(temperature))
if temperature > temp_high:
status = 'On'
logger.info('Temperature {:.1f} is greater than {:.1f}. Turning freezer {}.'.format(temperature, temp_high, status))
GPIO.output(23, True)
elif temperature < temp_low:
status = 'Off'
logger.info('Temperature {:.1f} is less than {:.1f}. Turning freezer {}.'.format(temperature, temp_low, status))
GPIO.output(23, False)
else:
if len(SummaryTable) > 0:
status = SummaryTable['status'][-1]
else:
status = 'unknown'
logger.info('Temperature if {:.1f}. Taking no action. Status is {}'.format(temperature, status))
##-------------------------------------------------------------------------
## Add row to data table
##-------------------------------------------------------------------------
logger.debug("Writing new row to data table.")
while len(temperatures_F) < 4:
temperatures_F.append(float('nan'))
SummaryTable.add_row((DateString, TimeString, ambient_temperature, temperature, \
temperatures_F[0], temperatures_F[1], temperatures_F[2], \
RH, AH, status))
## Write Table to File
logger.debug(" Writing new data file.")
ascii.write(SummaryTable, datafile, Writer=ascii.basic.Basic)
##-------------------------------------------------------------------------
## Log to Carriots
##-------------------------------------------------------------------------
logger.info('Sending Data to Carriots')
logger.debug(' Creating Device object')
Device = Carriots.Client(device_id="kegerator@joshwalawender")
logger.debug(' Reading api key')
Device.read_api_key_from_file(file=os.path.join(os.path.expanduser('~joshw'), '.carriots_api'))
data_dict = {'Temperature': temperature, \
'Status': status
}
logger.debug(' Data: {}'.format(data_dict))
Device.upload(data_dict)
logger.info('Done')
##-------------------------------------------------------------------------
## PLOT
##-------------------------------------------------------------------------
def plot(args):
import matplotlib.pyplot as pyplot
##-------------------------------------------------------------------------
## Set date to tonight if not specified
##-------------------------------------------------------------------------
now = datetime.datetime.now()
DateString = now.strftime("%Y%m%d")
if not args.date:
args.date = DateString
##-------------------------------------------------------------------------
## Define File Names
##-------------------------------------------------------------------------
LogFile = os.path.join('/', 'var', 'log', 'Kegerator', 'PlotLog_'+args.date+".txt")
PlotFile = os.path.join('/', 'var', 'log', 'Kegerator', args.date+".png")
DataFile = os.path.join('/', 'var', 'log', 'Kegerator', args.date+".txt")
##-------------------------------------------------------------------------
## Create logger object
##-------------------------------------------------------------------------
logger = logging.getLogger('MyLogger')
logger.setLevel(logging.DEBUG)
## Set up console output
LogConsoleHandler = logging.StreamHandler()
if args.verbose:
LogConsoleHandler.setLevel(logging.DEBUG)
else:
LogConsoleHandler.setLevel(logging.INFO)
LogFormat = logging.Formatter('%(asctime)23s %(levelname)8s: %(message)s')
LogConsoleHandler.setFormatter(LogFormat)
logger.addHandler(LogConsoleHandler)
## Set up file output
LogFileHandler = logging.FileHandler(LogFile)
LogFileHandler.setLevel(logging.DEBUG)
LogFileHandler.setFormatter(LogFormat)
logger.addHandler(LogFileHandler)
logger.info("Kegerator.py invoked with --plot option")
logger.info(" Making plot for day of {}".format(args.date))
##-------------------------------------------------------------------------
## Read Data
##-------------------------------------------------------------------------
if os.path.exists(DataFile):
logger.info(" Found data file: {}".format(DataFile))
data = ascii.read(DataFile, guess=False,
header_start=0, data_start=1,
Reader=ascii.basic.Basic,
converters={
'date': [ascii.convert_numpy('S10')],
'time': [ascii.convert_numpy('S12')],
'AmbTemp': [ascii.convert_numpy('f4')],
'KegTemp': [ascii.convert_numpy('f4')],
'KegTemp1': [ascii.convert_numpy('f4')],
'KegTemp2': [ascii.convert_numpy('f4')],
'KegTemp3': [ascii.convert_numpy('f4')],
'RH': [ascii.convert_numpy('f4')],
'AH': [ascii.convert_numpy('f4')],
'status': [ascii.convert_numpy('S11')],
})
datetime_objects = [datetime.datetime.strptime(x['time'], '%H:%M:%S HST') for x in data]
time_decimal = [(x.hour + x.minute/60. + x.second/3600.) for x in datetime_objects]
DecimalTime = max(time_decimal)
##-------------------------------------------------------------------------
## Make Plot
##-------------------------------------------------------------------------
plot_upper_temp = 45
plot_lower_temp = 29
pyplot.ioff()
plotpos = [
[0.05, 0.59, 0.65, 0.40], [0.73, 0.59, 0.21, 0.40],\
[0.05, 0.52, 0.65, 0.07], [0.73, 0.52, 0.21, 0.07],\
[0.05, 0.25, 0.65, 0.24], [0.73, 0.25, 0.21, 0.24],\
[0.05, 0.05, 0.65, 0.18], [0.73, 0.05, 0.21, 0.18],\
]
if len(data) > 1:
logger.info(" Generating plot {} ... ".format(PlotFile))
dpi = 100
pyplot.figure(figsize=(14,8), dpi=dpi)
## Plot Temperature for This Day
logger.debug(" Rendering Temperature Plot.")
TemperatureAxes = pyplot.axes(plotpos[0], xticklabels=[])
pyplot.title("Kegerator Temperatures for "+args.date)
pyplot.plot(time_decimal, data['KegTemp'], 'ko', label="Median Temp.", markersize=3, markeredgewidth=0)
pyplot.plot(time_decimal, data['KegTemp1'], 'bo', label="Temp. 1", markersize=2, markeredgewidth=0, alpha=0.6)
pyplot.plot(time_decimal, data['KegTemp2'], 'go', label="Temp. 2", markersize=2, markeredgewidth=0, alpha=0.6)
pyplot.plot(time_decimal, data['KegTemp3'], 'yo', label="Temp. 3", markersize=2, markeredgewidth=0, alpha=0.6)
pyplot.plot([DecimalTime, DecimalTime], [-100,100], 'g-', alpha=0.4)
pyplot.ylabel("Kegerator Temp. (F)")
pyplot.xlim(0, 24)
pyplot.xticks(np.arange(0,24,2))
pyplot.ylim(plot_lower_temp, plot_upper_temp)
pyplot.grid()
pyplot.legend(loc='best', prop={'size': 10})
TemperatureAxes.axhline(32, color='red', lw=4)
TemperatureAxes.axhline(temp_low, color='blue', lw=4)
TemperatureAxes.axhline(temp_high, color='blue', lw=4)
## Plot Temperature for Last Hour
logger.debug(" Rendering Recent Temperature Plot.")
RecentTemperatureAxes = pyplot.axes(plotpos[1], xticklabels=[], yticklabels=[])
pyplot.title("Last Hour")
pyplot.plot(time_decimal, data['KegTemp'], 'ko', label="Kegerator Temp", markersize=3, markeredgewidth=0)
pyplot.plot(time_decimal, data['KegTemp1'], 'bo', label="Kegerator Temp 1", markersize=2, markeredgewidth=0, alpha=0.6)
pyplot.plot(time_decimal, data['KegTemp2'], 'go', label="Kegerator Temp 2", markersize=2, markeredgewidth=0, alpha=0.6)
pyplot.plot(time_decimal, data['KegTemp3'], 'yo', label="Kegerator Temp 3", markersize=2, markeredgewidth=0, alpha=0.6)
pyplot.plot([DecimalTime, DecimalTime], [-100,100], 'g-', alpha=0.4)
pyplot.xticks(np.arange(0,24,0.25))
if DecimalTime > 1.0:
pyplot.xlim(DecimalTime-1.0, DecimalTime+0.1)
else:
pyplot.xlim(0,1.1)
pyplot.ylim(plot_lower_temp, plot_upper_temp)
pyplot.grid()
RecentTemperatureAxes.axhline(32, color='red', lw=4)
RecentTemperatureAxes.axhline(temp_low, color='blue', lw=4)
RecentTemperatureAxes.axhline(temp_high, color='blue', lw=4)
## Plot Relay State
translator = {'On': 1, 'Off': 0, 'unknown': -0.25}
relay_state = [translator[val] for val in data['status']]
logger.debug(" Rendering Relay Status Plot.")
RelayAxes = pyplot.axes(plotpos[2], yticklabels=[])
pyplot.plot(time_decimal, relay_state, 'ko-', markersize=3, markeredgewidth=0)
pyplot.plot([DecimalTime, DecimalTime], [-1,2], 'g-', alpha=0.4)
pyplot.ylabel("Relay")
pyplot.xlim(0, 24)
pyplot.yticks([0,1])
pyplot.ylim(-0.5,1.5)
pyplot.xticks(np.arange(0,24,2))
pyplot.grid()
## Plot Relay State for Last Hour
logger.debug(" Rendering Recent Relay State Plot.")
RecentRelayAxes = pyplot.axes(plotpos[3], yticklabels=[])
pyplot.plot(time_decimal, relay_state, 'ko-', markersize=3, markeredgewidth=0)
pyplot.plot([DecimalTime, DecimalTime], [-1,2], 'g-', alpha=0.4)
pyplot.xticks(np.arange(0,24,0.25))
if DecimalTime > 1.0:
pyplot.xlim(DecimalTime-1.0, DecimalTime+0.1)
else:
pyplot.xlim(0,1.1)
pyplot.yticks([0,1])
pyplot.ylim(-0.5,1.5)
pyplot.grid()
## Plot Humidity for This Day
HumidityAxes = pyplot.axes(plotpos[4], xticklabels=[])
logger.debug(" Rendering Humidity Plot.")
pyplot.plot(time_decimal, data['RH'], 'bo', label="Humidity", markersize=3, markeredgewidth=0)
pyplot.plot([DecimalTime, DecimalTime], [0,100], 'g-', alpha=0.4)
pyplot.ylabel("Humidity (%)")
pyplot.xlabel("Time (Hours HST)")
pyplot.xlim(0, 24)
pyplot.ylim(30,100)
pyplot.xticks(np.arange(0,24,2))
pyplot.grid()
## Plot Humidity for Last 2 Hours
logger.debug(" Rendering Recent Humidity Plot.")
RecentHumidityAxes = pyplot.axes(plotpos[5], yticklabels=[], xticklabels=[])
pyplot.plot(time_decimal, data['RH'], 'bo', label="Humidity", markersize=3, markeredgewidth=0)
pyplot.plot([DecimalTime, DecimalTime], [0,100], 'g-', alpha=0.4)
pyplot.xticks(np.arange(0,24,0.25))
if DecimalTime > 1.0:
pyplot.xlim(DecimalTime-1.0, DecimalTime+0.1)
else:
pyplot.xlim(0,1.1)
pyplot.ylim(30,100)
pyplot.grid()
## Plot Case Temperature for This Day
logger.debug(" Rendering Case Temperature Plot.")
AmbTemperatureAxes = pyplot.axes(plotpos[6])
pyplot.plot(time_decimal, data['AmbTemp'], 'ro', label="Ambient Temp", markersize=3, markeredgewidth=0)
pyplot.plot([DecimalTime, DecimalTime], [-100,100], 'g-', alpha=0.4)
pyplot.ylabel("Case Temp. (F)")
pyplot.xlim(0, 24)
pyplot.xticks(np.arange(0,24,2))
pyplot.yticks(np.arange(60,100,5))
pyplot.ylim(math.floor(min(data['AmbTemp'])-6), math.ceil(max(data['AmbTemp'])+6))
pyplot.grid()
## Plot Case Temperature for Last Hour
logger.debug(" Rendering Recent Case Temperature Plot.")
RecentAmbTemperatureAxes = pyplot.axes(plotpos[7], yticklabels=[])
pyplot.plot(time_decimal, data['AmbTemp'], 'ro', label="Ambient Temp", markersize=3, markeredgewidth=0)
pyplot.plot([DecimalTime, DecimalTime], [-100,100], 'g-', alpha=0.4)
pyplot.xticks(np.arange(0,24,0.25))
pyplot.yticks(np.arange(60,100,5))
pyplot.ylim(math.floor(min(data['AmbTemp'])-6), math.ceil(max(data['AmbTemp'])+6))
if DecimalTime > 1.0:
pyplot.xlim(DecimalTime-1.0, DecimalTime+0.1)
else:
pyplot.xlim(0,1.1)
pyplot.grid()
logger.debug(" Saving plot to file: {}".format(PlotFile))
pyplot.savefig(PlotFile, dpi=dpi, bbox_inches='tight', pad_inches=0.05)
logger.info(" done.")
else:
logger.info("Could not find data file: {}".format(DataFile))
##-------------------------------------------------------------------------
## Create Daily Symlink if Not Already
##-------------------------------------------------------------------------
LinkFileName = 'latest.png'
LinkFile = os.path.join('/', 'var', 'log', 'Kegerator', LinkFileName)
if not os.path.exists(LinkFile):
logger.info('Making {} symlink to {}'.format(LinkFile, PlotFile))
os.symlink(PlotFile, LinkFile)
logger.info("Done")
if __name__ == '__main__':
##-------------------------------------------------------------------------
## Parse Command Line Arguments
##-------------------------------------------------------------------------
parser = argparse.ArgumentParser(
description="Program description.")
parser.add_argument("-v", "--verbose",
action="store_true", dest="verbose",
default=False, help="Be verbose! (default = False)")
parser.add_argument("-p", "--plot",
action="store_true", dest="plot",
default=False, help="Make plot.")
parser.add_argument("-d", "--date",
dest="date", required=False, default="", type=str,
help="Date to analyze. (i.e. '20130805')")
args = parser.parse_args()
if not args.plot:
main(args)
else:
plot(args)
| bsd-2-clause |
lanselin/pysal | pysal/esda/tabular.py | 6 | 12714 | #from ...common import requires as _requires
import itertools as _it
from pysal.weights import W
# I would like to define it like this, so that you could make a call like:
# Geary(df, 'HOVAL', 'INC', w=W), but this only works in Python3. So, I have to
# use a workaround
#def _statistic(df, *cols, stat=None, w=None, inplace=True,
def _univariate_handler(df, cols, stat=None, w=None, inplace=True,
pvalue = 'sim', outvals = None, swapname='', **kwargs):
"""
Compute a univariate descriptive statistic `stat` over columns `cols` in
`df`.
Parameters
----------
df : pandas.DataFrame
the dataframe containing columns to compute the descriptive
statistics
cols : string or list of strings
one or more names of columns in `df` to use to compute
exploratory descriptive statistics.
stat : callable
a function that takes data as a first argument and any number
of configuration keyword arguments and returns an object
encapsulating the exploratory statistic results
w : pysal.weights.W
the spatial weights object corresponding to the dataframe
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
### Preprocess
if not inplace:
new_df = df.copy()
_univariate_handler(new_df, cols, stat=stat, w=w, pvalue=pvalue,
inplace=True, outvals=outvals,
swapname=swapname, **kwargs)
return new_df
if w is None:
for name in df._metadata:
this_obj = df.__dict__.get(name)
if isinstance(this_obj, W):
w = this_obj
if w is None:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
### Prep indexes
if outvals is None:
outvals = []
outvals.insert(0,'_statistic')
if pvalue.lower() in ['all', 'both', '*']:
raise NotImplementedError("If you want more than one type of PValue,add"
" the targeted pvalue type to outvals. For example:"
" Geary(df, cols=['HOVAL'], w=w, outvals=['p_z_sim', "
"'p_rand']")
# this is nontrivial, since we
# can't know which p_value types are on the object without computing it.
# This is because we don't flag them with @properties, so they're just
# arbitrarily assigned post-facto. One solution might be to post-process the
# objects, determine which pvalue types are available, and then grab them
# all if needed.
if pvalue is not '':
outvals.append('p_'+pvalue.lower())
if isinstance(cols, str):
cols = [cols]
### Make closure around weights & apply columnwise
def column_stat(column):
return stat(column.values, w=w, **kwargs)
stat_objs = df[cols].apply(column_stat)
### Assign into dataframe
for col in cols:
stat_obj = stat_objs[col]
y = kwargs.get('y')
if y is not None:
col += '-' + y.name
outcols = ['_'.join((col, val)) for val in outvals]
for colname, attname in zip(outcols, outvals):
df[colname] = stat_obj.__getattribute__(attname)
if swapname is not '':
df.columns = [_swap_ending(col, swapname) if col.endswith('_statistic') else col
for col in df.columns]
def _bivariate_handler(df, x, y=None, w=None, inplace=True, pvalue='sim',
outvals=None, **kwargs):
"""
Compute a descriptive bivariate statistic over two sets of columns, `x` and
`y`, contained in `df`.
Parameters
----------
df : pandas.DataFrame
dataframe in which columns `x` and `y` are contained
x : string or list of strings
one or more column names to use as variates in the bivariate
statistics
y : string or list of strings
one or more column names to use as variates in the bivariate
statistics
w : pysal.weights.W
spatial weights object corresponding to the dataframe `df`
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
real_swapname = kwargs.pop('swapname', '')
if isinstance(y, str):
y = [y]
if isinstance(x, str):
x = [x]
if not inplace:
new_df = df.copy()
_bivariate_handler(new_df, x, y=y, w=w, inplace=True,
swapname=real_swapname,
pvalue=pvalue, outvals=outvals, **kwargs)
return new_df
if y is None:
y = x
for xi,yi in _it.product(x,y):
if xi == yi:
continue
_univariate_handler(df, cols=xi, w=w, y=df[yi], inplace=True,
pvalue=pvalue, outvals=outvals, swapname='', **kwargs)
if real_swapname is not '':
df.columns = [_swap_ending(col, real_swapname)
if col.endswith('_statistic')
else col for col in df.columns]
def _swap_ending(s, ending, delim='_'):
"""
Replace the ending of a string, delimited into an arbitrary
number of chunks by `delim`, with the ending provided
Parameters
----------
s : string
string to replace endings
ending : string
string used to replace ending of `s`
delim : string
string that splits s into one or more parts
Returns
-------
new string where the final chunk of `s`, delimited by `delim`, is replaced
with `ending`.
"""
parts = [x for x in s.split(delim)[:-1] if x != '']
parts.append(ending)
return delim.join(parts)
##############
# DOCSTRINGS #
##############
_univ_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
_bv_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
_rate_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored. If one population column is provided, it is
used for all event columns. If more than one population column
is provided but there is not a population for every event
column, an exception will be raised.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
| bsd-3-clause |
AIML/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
HappyFaceGoettingen/HappyFaceCore | modules/CMSPhedexDataExtract.py | 1 | 15003 | # -*- coding: utf-8 -*-
import hf, lxml, logging, datetime
from sqlalchemy import *
import json
from string import strip
import time
class CMSPhedexDataExtract(hf.module.ModuleBase):
config_keys = {
'link_direction': ("transfers 'from' or 'to' you", 'to'),
'time_range': ('set timerange in hours', '24'),
'base_url': ('use --no-check-certificate, end base url with starttime=', ''),
'report_base':("insert base url for reports don't forget fromfilter or tofilter with your name! finish your link with starttime=, the starttime is inserted by the module ",'https://cmsweb.cern.ch/phedex/prod/Activity::ErrorInfo?tofilter=T1_DE_KIT&starttime='),
'blacklist': ('ignore links from or to those sites, csv', ''),
'your_name': ('Name of your site', 'T1_DE_KIT_Buffer'),
'category': ('use prod or debug, its used to build links to the cern info sites','prod'),
'button_pic_path_in': ('path to your in-button picture', '/HappyFace/gridka/static/themes/armin_box_arrows/trans_in.png'),
'button_pic_path_out': ('path to your out-button picture', '/HappyFace/gridka/static/themes/armin_box_arrows/trans_out.png'),
'qualitiy_broken_value': ('a timebin with a qualitiy equal or less than this will be considered as broken', '0.4'),
't0_critical_failures': ('failure threshold for status critical', '10'),
't0_warning_failures': ('failure threshold for status warning', '10'),
't1_critical_failures': ('failure threshold for status critical', '15'),
't1_warning_failures': ('failure threshold for status warning', '10'),
't2_critical_failures': ('failure threshold for status critical', '15'),
't2_warning_failures': ('failure threshold for status warning', '10'),
't3_critical_failures': ('failure threshold for status critical', '15'),
't3_warning_failures': ('failure threshold for status warning', '10'),
't0_critical_quality': ('quality threshold for status critical', '0.5'),
't0_warning_quality': ('quality threshold for status warning', '0.6'),
't1_critical_quality': ('quality threshold for status critical', '0.5'),
't1_warning_quality': ('quality threshold for status warning', '0.6'),
't2_critical_quality': ('quality threshold for status critical', '0.3'),
't2_warning_quality': ('quality threshold for status warning', '0.5'),
't3_critical_quality': ('quality threshold for status critical', '0.3'),
't3_warning_quality': ('quality threshold for status warning', '0.5'),
't1_critical_ratio': ('ratio of (2*broken_links+warning_links)/all_links threshold', '0.5'),
't1_warning_ratio': ('ratio of (2*broken_links+warning_links)/all_links threshold', '0.3'),
't2_critical_ratio': ('ratio of (2*broken_links+warning_links)/all_links threshold', '0.4'),
't2_warning_ratio': ('ratio of (2*broken_links+warning_links)/all_links threshold', '0.3'),
't3_warning_ratio': ('ratio of (2*broken_links+warning_links)/all_links threshold', '0.3'),
't3_critical_ratio': ('ratio of (2*broken_links+warning_links)/all_links threshold', '0.5'),
'eval_time': ('links within the last eval_time hours will be considered valuable status evaluation', '3'),
't0_eval_amount': ('minimum amount of links to eval status for this link group', 0),
't1_eval_amount': ('minimum amount of links to eval status for this link group', 0),
't2_eval_amount': ('minimum amount of links to eval status for this link group', 5),
't3_eval_amount': ('minimum amount of links to eval status for this link group', 5)
}
config_hint = 'If you have problems downloading your source file, use: "source_url = both|--no-check-certificate|url"'
table_columns = [
Column('direction', TEXT),
Column('request_timestamp', INT),
Column('time_range', INT),
], []
subtable_columns = {
'details': ([
Column('done_files', INT),
Column('fail_files', INT),
Column('timebin', INT),
Column('rate', INT),
Column('name', TEXT),
Column('color', TEXT),
Column('quality', FLOAT)
], [])
}
def prepareAcquisition(self):
self.url = self.config['base_url']
self.link_direction = self.config['link_direction']
self.your_name = self.config['your_name']
self.eval_time = int(self.config['eval_time'])
if self.link_direction == 'from':
self.parse_direction = 'to'
else:
self.parse_direction = 'from'
self.time_range = int(self.config['time_range'])
try:
self.blacklist = self.config['blacklist']
except AttributeError:
self.blacklist = []
self.time = int(time.time())/3600*3600
self.url += str(self.time-self.time_range*3600)
self.source = hf.downloadService.addDownload(self.url)
self.details_db_value_list = []
self.category = self.config['category']
self.button_pic_in = self.config['button_pic_path_in']
self.button_pic_out = self.config['button_pic_path_out']
self.qualitiy_broken_value = float(self.config['qualitiy_broken_value'])
self.critical_failures = {}
self.warning_failures = {}
self.critical_quality = {}
self.warning_quality = {}
self.critical_ratio = {}
self.warning_ratio = {}
self.eval_amount = {}
for tier in ['t0', 't1', 't2', 't3']:
self.critical_failures[tier] = int(self.config[tier + '_critical_failures'])
self.warning_failures[tier] = int(self.config[tier + '_warning_failures'])
self.critical_quality[tier] = float(self.config[tier + '_critical_quality'])
self.warning_quality[tier] = float(self.config[tier + '_warning_quality'])
self.eval_amount[tier] = int(self.config[tier + '_eval_amount'])
if tier != 't0':
self.critical_ratio[tier] = float(self.config[tier + '_critical_ratio'])
self.warning_ratio[tier] = float(self.config[tier + '_warning_ratio'])
def extractData(self):
#due to portability reasons this colormap is hardcoded produce a new colormap with: color_map = map(lambda i: matplotlib.colors.rgb2hex(matplotlib.pyplot.get_cmap('RdYlGn')(float(i)/100)), range(101))
color_map = ['#a50026', '#a90426', '#af0926', '#b30d26', '#b91326', '#bd1726', '#c21c27', '#c62027', '#cc2627', '#d22b27', '#d62f27', '#da362a', '#dc3b2c', '#e0422f', '#e24731', '#e54e35', '#e75337', '#eb5a3a', '#ee613e', '#f16640', '#f46d43', '#f57245', '#f67a49', '#f67f4b', '#f8864f', '#f98e52', '#f99355', '#fa9b58', '#fba05b', '#fca85e', '#fdad60', '#fdb365', '#fdb768', '#fdbd6d', '#fdc372', '#fdc776', '#fecc7b', '#fed07e', '#fed683', '#feda86', '#fee08b', '#fee28f', '#fee695', '#feea9b', '#feec9f', '#fff0a6', '#fff2aa', '#fff6b0', '#fff8b4', '#fffcba', '#feffbe', '#fbfdba', '#f7fcb4',\
'#f4fab0', '#eff8aa', '#ecf7a6', '#e8f59f', '#e5f49b', '#e0f295', '#dcf08f', '#d9ef8b', '#d3ec87', '#cfeb85', '#c9e881', '#c5e67e', '#bfe47a', '#bbe278', '#b5df74', '#afdd70', '#abdb6d', '#a5d86a', '#a0d669', '#98d368', '#93d168', '#8ccd67', '#84ca66', '#7fc866', '#78c565', '#73c264', '#6bbf64', '#66bd63', '#5db961', '#57b65f', '#4eb15d', '#45ad5b', '#3faa59', '#36a657', '#30a356', '#279f53', '#219c52', '#199750', '#17934e', '#148e4b', '#118848', '#0f8446', '#0c7f43', '#0a7b41', '#07753e', '#05713c', '#026c39', '#006837']
data = {'direction' : self.link_direction, 'source_url' : self.source.getSourceUrl(), 'time_range' : self.time_range, 'request_timestamp' : self.time}
x_line = self.time - self.eval_time * 3600 #data with a timestamp greater than this one will be used for status evaluation
#store the last N qualities of the Tx links within those dictionaries, {TX_xxx : (q1,q2,q3...)}
link_list = {} # link_list['t1']['t1_de_kit'] == [{time1}, {time2}, ]
fobj = json.load(open(self.source.getTmpPath(), 'r'))['phedex']['link']
for links in fobj:
if links[self.link_direction] == self.your_name and links[self.parse_direction] not in self.blacklist:
link_name = links[self.parse_direction]
tier = 't' + link_name[1]
for transfer in links['transfer']:
help_append = {}
help_append['timebin'] = int(transfer['timebin'])
help_append['done_files'] = done = int(transfer['done_files'])
help_append['fail_files'] = fail = int(transfer['fail_files'])
help_append['rate'] = int(transfer['rate'])
help_append['name'] = link_name
#quality = done_files/(done_files + fail_files), if else to catch ZeroDivisionError
if done != 0:
help_append['quality'] = float(done)/float(done + fail)
help_append['color'] = color_map[int(help_append['quality']*100)]
self.details_db_value_list.append(help_append)
if help_append['timebin'] >= x_line:
link_list.setdefault(tier, {}).setdefault(link_name, []).append(help_append)
elif fail != 0:
help_append['quality'] = 0.0
help_append['color'] = color_map[int(help_append['quality']*100)]
self.details_db_value_list.append(help_append)
if help_append['timebin'] >= x_line:
link_list.setdefault(tier, {}).setdefault(link_name, []).append(help_append)
# code for status evaluation TODO: find a way to evaluate trend, change of quality between two bins etc.
data['status'] = 1.0
for tier,links in link_list.iteritems():
good_link = 0
bad_link = 0
warn_link = 0
for link_name, time_bins in links.iteritems():
try:
done_files = 0
fail_files = 0
for single_bin in time_bins:
done_files += int(single_bin['done_files'])
fail_files += int(single_bin['fail_files'])
if fail_files != 0 and (float(done_files) / (done_files + fail_files) <= self.critical_quality[tier] or fail_files >= self.critical_failures[tier]):
bad_link += 1
elif fail_files != 0 and (float(done_files) / (done_files + fail_files) <= self.warning_quality[tier] or fail_files >= self.warning_failures[tier]):
warn_link += 1
elif done_files != 0:
good_link += 1
except IndexError:
pass
if tier == 't0' and bad_link > 0: #here you could use a config parameter
data['status'] = 0.0
break
elif tier != 't0':
if ((2.0 * bad_link + warn_link) / (2.0 * bad_link + warn_link + good_link) >= self.critical_ratio[tier]) and self.eval_amount[tier] <= (bad_link + warn_link + good_link):
data['status'] = 0.0
break
elif ((2.0 * bad_link + warn_link) / (2.0 * bad_link + warn_link + good_link) >= self.warning_ratio[tier]) and self.eval_amount[tier] <= (bad_link + warn_link + good_link):
data['status'] = 0.5
return data
def fillSubtables(self, parent_id):
self.subtables['details'].insert().execute([dict(parent_id=parent_id, **row) for row in self.details_db_value_list])
def getTemplateData(self):
report_base = strip(self.config['report_base']) + '&'
your_direction = strip(self.config['link_direction'])
if your_direction == 'from':
their_direction = 'tofilter='
your_direction = 'fromfilter='
else:
their_direction = 'fromfilter='
your_direction = 'tofilter='
data = hf.module.ModuleBase.getTemplateData(self)
details_list = self.subtables['details'].select().where(self.subtables['details'].c.parent_id==self.dataset['id']).order_by(self.subtables['details'].c.name.asc()).execute().fetchall()
raw_data_list = [] #contains dicts {x,y,weight,fails,done,rate,time,color,link} where the weight determines the the color
x0 = self.dataset['request_timestamp'] / 3600 * 3600 - self.dataset['time_range'] * 3600 #normalize the timestamps to the requested timerange
y_value_map = {} # maps the name of a link to a y-value
for values in details_list:
if values['name'] not in y_value_map: #add a new entry if the link name is not in the value_map
y_value_map[values['name']] = len(y_value_map)
t_number = values['name'].split('_')[0].lower()
marking_color = values['color']
if int(self.config['%s_critical_failures'%t_number]) <= int(values['fail_files']):
marking_color = '#ff0000'
elif int(self.config['%s_warning_failures'%t_number]) <= int(values['fail_files']):
marking_color = '#af00af'
help_dict = {'x':int(values['timebin']-x0)/3600, 'y':int(y_value_map[values['name']]), 'w':str('%.2f' %values['quality']), 'fails':int(values['fail_files']), 'done':int(values['done_files']), 'rate':str('%.3f' %(float(values['rate'])/1024/1024)), 'time':datetime.datetime.fromtimestamp(values['timebin']), 'color':values['color'], 'link':report_base + their_direction + values['name'], 'marking':marking_color}
raw_data_list.append(help_dict)
name_mapper = []
for i in range(len(y_value_map)):
name_mapper.append('-')
for key in y_value_map.iterkeys():
name_mapper[y_value_map[key]] = key
for i,name in enumerate(name_mapper):
name_mapper[i] = {'name':name, 'link':report_base + their_direction + name}
data['link_list'] = raw_data_list
data['titles'] = name_mapper
data['height'] = len(y_value_map) * 15 + 100
data['width'] = int(660/(self.dataset['time_range']+1))
data['button_pic_in'] = self.config['button_pic_path_in']
data['button_pic_out'] = self.config['button_pic_path_out']
data['info_link_1'] = 'https://cmsweb.cern.ch/phedex/' + self.config['category'] + '/Activity::QualityPlots?graph=quality_all&entity=dest&src_filter='
data['info_link_2'] = 'https://cmsweb.cern.ch/phedex/' + self.config['category'] + '/Activity::QualityPlots?graph=quality_all&entity=src&dest_filter='
return data | apache-2.0 |
xhochy/arrow | python/pyarrow/tests/test_dataset.py | 1 | 86386 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import os
import pathlib
import pickle
import textwrap
import numpy as np
import pytest
import pyarrow as pa
import pyarrow.csv
import pyarrow.fs as fs
from pyarrow.tests.util import change_cwd
try:
import pandas as pd
except ImportError:
pd = None
try:
import pyarrow.dataset as ds
except ImportError:
ds = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not dataset'
pytestmark = pytest.mark.dataset
def _generate_data(n):
import datetime
import itertools
day = datetime.datetime(2000, 1, 1)
interval = datetime.timedelta(days=5)
colors = itertools.cycle(['green', 'blue', 'yellow', 'red', 'orange'])
data = []
for i in range(n):
data.append((day, i, float(i), next(colors)))
day += interval
return pd.DataFrame(data, columns=['date', 'index', 'value', 'color'])
def _table_from_pandas(df):
schema = pa.schema([
pa.field('date', pa.date32()),
pa.field('index', pa.int64()),
pa.field('value', pa.float64()),
pa.field('color', pa.string()),
])
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
return table.replace_schema_metadata()
def _filesystem_uri(path):
# URIs on Windows must follow 'file:///C:...' or 'file:/C:...' patterns.
if os.name == 'nt':
uri = 'file:///{}'.format(path)
else:
uri = 'file://{}'.format(path)
return uri
@pytest.fixture
@pytest.mark.parquet
def mockfs():
import pyarrow.parquet as pq
mockfs = fs._MockFileSystem()
directories = [
'subdir/1/xxx',
'subdir/2/yyy',
]
for i, directory in enumerate(directories):
path = '{}/file{}.parquet'.format(directory, i)
mockfs.create_dir(directory)
with mockfs.open_output_stream(path) as out:
data = [
list(range(5)),
list(map(float, range(5))),
list(map(str, range(5))),
[i] * 5
]
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64()),
pa.field('str', pa.string()),
pa.field('const', pa.int64()),
])
batch = pa.record_batch(data, schema=schema)
table = pa.Table.from_batches([batch])
pq.write_table(table, out)
return mockfs
@pytest.fixture
def open_logging_fs(monkeypatch):
from pyarrow.fs import PyFileSystem, LocalFileSystem
from .test_fs import ProxyHandler
localfs = LocalFileSystem()
def normalized(paths):
return {localfs.normalize_path(str(p)) for p in paths}
opened = set()
def open_input_file(self, path):
path = localfs.normalize_path(str(path))
opened.add(path)
return self._fs.open_input_file(path)
# patch proxyhandler to log calls to open_input_file
monkeypatch.setattr(ProxyHandler, "open_input_file", open_input_file)
fs = PyFileSystem(ProxyHandler(localfs))
@contextlib.contextmanager
def assert_opens(expected_opened):
opened.clear()
try:
yield
finally:
assert normalized(opened) == normalized(expected_opened)
return fs, assert_opens
@pytest.fixture(scope='module')
def multisourcefs(request):
request.config.pyarrow.requires('pandas')
request.config.pyarrow.requires('parquet')
import pyarrow.parquet as pq
df = _generate_data(1000)
mockfs = fs._MockFileSystem()
# simply split the dataframe into three chunks to construct a data source
# from each chunk into its own directory
df_a, df_b, df_c, df_d = np.array_split(df, 4)
# create a directory containing a flat sequence of parquet files without
# any partitioning involved
mockfs.create_dir('plain')
for i, chunk in enumerate(np.array_split(df_a, 10)):
path = 'plain/chunk-{}.parquet'.format(i)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
# create one with schema partitioning by weekday and color
mockfs.create_dir('schema')
for part, chunk in df_b.groupby([df_b.date.dt.dayofweek, df_b.color]):
folder = 'schema/{}/{}'.format(*part)
path = '{}/chunk.parquet'.format(folder)
mockfs.create_dir(folder)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
# create one with hive partitioning by year and month
mockfs.create_dir('hive')
for part, chunk in df_c.groupby([df_c.date.dt.year, df_c.date.dt.month]):
folder = 'hive/year={}/month={}'.format(*part)
path = '{}/chunk.parquet'.format(folder)
mockfs.create_dir(folder)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
# create one with hive partitioning by color
mockfs.create_dir('hive_color')
for part, chunk in df_d.groupby(["color"]):
folder = 'hive_color/color={}'.format(*part)
path = '{}/chunk.parquet'.format(folder)
mockfs.create_dir(folder)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
return mockfs
@pytest.fixture
def dataset(mockfs):
format = ds.ParquetFileFormat()
selector = fs.FileSelector('subdir', recursive=True)
options = ds.FileSystemFactoryOptions('subdir')
options.partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
return factory.finish()
def test_filesystem_dataset(mockfs):
schema = pa.schema([
pa.field('const', pa.int64())
])
file_format = ds.ParquetFileFormat()
paths = ['subdir/1/xxx/file0.parquet', 'subdir/2/yyy/file1.parquet']
partitions = [ds.field('part') == x for x in range(1, 3)]
fragments = [file_format.make_fragment(path, mockfs, part)
for path, part in zip(paths, partitions)]
root_partition = ds.field('level') == ds.scalar(1337)
dataset_from_fragments = ds.FileSystemDataset(
fragments, schema=schema, format=file_format,
filesystem=mockfs, root_partition=root_partition,
)
dataset_from_paths = ds.FileSystemDataset.from_paths(
paths, schema=schema, format=file_format, filesystem=mockfs,
partitions=partitions, root_partition=root_partition,
)
for dataset in [dataset_from_fragments, dataset_from_paths]:
assert isinstance(dataset, ds.FileSystemDataset)
assert isinstance(dataset.format, ds.ParquetFileFormat)
assert dataset.partition_expression.equals(root_partition)
assert set(dataset.files) == set(paths)
fragments = list(dataset.get_fragments())
for fragment, partition, path in zip(fragments, partitions, paths):
assert fragment.partition_expression.equals(partition)
assert fragment.path == path
assert isinstance(fragment.format, ds.ParquetFileFormat)
assert isinstance(fragment, ds.ParquetFileFragment)
assert fragment.row_groups == [0]
assert fragment.num_row_groups == 1
row_group_fragments = list(fragment.split_by_row_group())
assert fragment.num_row_groups == len(row_group_fragments) == 1
assert isinstance(row_group_fragments[0], ds.ParquetFileFragment)
assert row_group_fragments[0].path == path
assert row_group_fragments[0].row_groups == [0]
assert row_group_fragments[0].num_row_groups == 1
fragments = list(dataset.get_fragments(filter=ds.field("const") == 0))
assert len(fragments) == 2
# the root_partition keyword has a default
dataset = ds.FileSystemDataset(
fragments, schema=schema, format=file_format, filesystem=mockfs
)
assert dataset.partition_expression.equals(ds.scalar(True))
# from_paths partitions have defaults
dataset = ds.FileSystemDataset.from_paths(
paths, schema=schema, format=file_format, filesystem=mockfs
)
assert dataset.partition_expression.equals(ds.scalar(True))
for fragment in dataset.get_fragments():
assert fragment.partition_expression.equals(ds.scalar(True))
# validation of required arguments
with pytest.raises(TypeError, match="incorrect type"):
ds.FileSystemDataset(fragments, file_format, schema)
# validation of root_partition
with pytest.raises(TypeError, match="incorrect type"):
ds.FileSystemDataset(fragments, schema=schema,
format=file_format, root_partition=1)
# missing required argument in from_paths
with pytest.raises(TypeError, match="incorrect type"):
ds.FileSystemDataset.from_paths(fragments, format=file_format)
def test_filesystem_dataset_no_filesystem_interaction():
# ARROW-8283
schema = pa.schema([
pa.field('f1', pa.int64())
])
file_format = ds.IpcFileFormat()
paths = ['nonexistingfile.arrow']
# creating the dataset itself doesn't raise
dataset = ds.FileSystemDataset.from_paths(
paths, schema=schema, format=file_format,
filesystem=fs.LocalFileSystem(),
)
# getting fragments also doesn't raise
dataset.get_fragments()
# scanning does raise
with pytest.raises(FileNotFoundError):
dataset.to_table()
def test_dataset(dataset):
assert isinstance(dataset, ds.Dataset)
assert isinstance(dataset.schema, pa.Schema)
# TODO(kszucs): test non-boolean Exprs for filter do raise
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
for task in dataset.scan():
assert isinstance(task, ds.ScanTask)
for batch in task.execute():
assert batch.column(0).equals(expected_i64)
assert batch.column(1).equals(expected_f64)
batches = dataset.to_batches()
assert all(isinstance(batch, pa.RecordBatch) for batch in batches)
table = dataset.to_table()
assert isinstance(table, pa.Table)
assert len(table) == 10
condition = ds.field('i64') == 1
result = dataset.to_table(use_threads=True, filter=condition).to_pydict()
# don't rely on the scanning order
assert result['i64'] == [1, 1]
assert result['f64'] == [1., 1.]
assert sorted(result['group']) == [1, 2]
assert sorted(result['key']) == ['xxx', 'yyy']
def test_scanner(dataset):
scanner = ds.Scanner.from_dataset(dataset,
memory_pool=pa.default_memory_pool())
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
with pytest.raises(pa.ArrowInvalid):
ds.Scanner.from_dataset(dataset, columns=['unknown'])
scanner = ds.Scanner.from_dataset(dataset, columns=['i64'],
memory_pool=pa.default_memory_pool())
assert isinstance(scanner, ds.Scanner)
assert len(list(scanner.scan())) == 2
for task in scanner.scan():
for batch in task.execute():
assert batch.num_columns == 1
def test_abstract_classes():
classes = [
ds.FileFormat,
ds.Scanner,
ds.Partitioning,
]
for klass in classes:
with pytest.raises(TypeError):
klass()
def test_partitioning():
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
for klass in [ds.DirectoryPartitioning, ds.HivePartitioning]:
partitioning = klass(schema)
assert isinstance(partitioning, ds.Partitioning)
partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int64()),
pa.field('key', pa.float64())
])
)
expr = partitioning.parse('/3/3.14')
assert isinstance(expr, ds.Expression)
expected = (ds.field('group') == 3) & (ds.field('key') == 3.14)
assert expr.equals(expected)
with pytest.raises(pa.ArrowInvalid):
partitioning.parse('/prefix/3/aaa')
partitioning = ds.HivePartitioning(
pa.schema([
pa.field('alpha', pa.int64()),
pa.field('beta', pa.int64())
])
)
expr = partitioning.parse('/alpha=0/beta=3')
expected = (
(ds.field('alpha') == ds.scalar(0)) &
(ds.field('beta') == ds.scalar(3))
)
assert expr.equals(expected)
for shouldfail in ['/alpha=one/beta=2', '/alpha=one', '/beta=two']:
with pytest.raises(pa.ArrowInvalid):
partitioning.parse(shouldfail)
def test_expression_serialization():
a = ds.scalar(1)
b = ds.scalar(1.1)
c = ds.scalar(True)
d = ds.scalar("string")
e = ds.scalar(None)
f = ds.scalar({'a': 1})
g = ds.scalar(pa.scalar(1))
condition = ds.field('i64') > 5
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
assert condition.validate(schema) == pa.bool_()
assert condition.assume(ds.field('i64') == 5).equals(
ds.scalar(False))
assert condition.assume(ds.field('i64') == 7).equals(
ds.scalar(True))
all_exprs = [a, b, c, d, e, f, g, a == b, a > b, a & b, a | b, ~c,
d.is_valid(), a.cast(pa.int32(), safe=False),
a.cast(pa.int32(), safe=False), a.isin([1, 2, 3]),
ds.field('i64') > 5, ds.field('i64') == 5,
ds.field('i64') == 7]
for expr in all_exprs:
assert isinstance(expr, ds.Expression)
restored = pickle.loads(pickle.dumps(expr))
assert expr.equals(restored)
def test_expression_construction():
zero = ds.scalar(0)
one = ds.scalar(1)
true = ds.scalar(True)
false = ds.scalar(False)
string = ds.scalar("string")
field = ds.field("field")
zero | one == string
~true == false
for typ in ("bool", pa.bool_()):
field.cast(typ) == true
field.isin([1, 2])
with pytest.raises(TypeError):
field.isin(1)
with pytest.raises(pa.ArrowInvalid):
field != {1}
def test_partition_keys():
a, b, c = [ds.field(f) == f for f in 'abc']
assert ds._get_partition_keys(a) == {'a': 'a'}
assert ds._get_partition_keys(a & b & c) == {f: f for f in 'abc'}
nope = ds.field('d') >= 3
assert ds._get_partition_keys(nope) == {}
assert ds._get_partition_keys(a & nope) == {'a': 'a'}
def test_parquet_read_options():
opts1 = ds.ParquetReadOptions()
opts2 = ds.ParquetReadOptions(buffer_size=4096,
dictionary_columns=['a', 'b'])
opts3 = ds.ParquetReadOptions(buffer_size=2**13, use_buffered_stream=True,
dictionary_columns={'a', 'b'})
assert opts1.use_buffered_stream is False
assert opts1.buffer_size == 2**13
assert opts1.dictionary_columns == set()
assert opts2.use_buffered_stream is False
assert opts2.buffer_size == 2**12
assert opts2.dictionary_columns == {'a', 'b'}
assert opts3.use_buffered_stream is True
assert opts3.buffer_size == 2**13
assert opts3.dictionary_columns == {'a', 'b'}
assert opts1 == opts1
assert opts1 != opts2
assert opts2 != opts3
def test_file_format_pickling():
formats = [
ds.IpcFileFormat(),
ds.CsvFileFormat(),
ds.CsvFileFormat(pa.csv.ParseOptions(delimiter='\t',
ignore_empty_lines=True)),
ds.ParquetFileFormat(),
ds.ParquetFileFormat(
read_options=ds.ParquetReadOptions(use_buffered_stream=True)
),
ds.ParquetFileFormat(
read_options={
'use_buffered_stream': True,
'buffer_size': 4096,
}
)
]
for file_format in formats:
assert pickle.loads(pickle.dumps(file_format)) == file_format
@pytest.mark.parametrize('paths_or_selector', [
fs.FileSelector('subdir', recursive=True),
[
'subdir/1/xxx/file0.parquet',
'subdir/2/yyy/file1.parquet',
]
])
def test_filesystem_factory(mockfs, paths_or_selector):
format = ds.ParquetFileFormat(
read_options=ds.ParquetReadOptions(dictionary_columns={"str"})
)
options = ds.FileSystemFactoryOptions('subdir')
options.partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
assert options.partition_base_dir == 'subdir'
assert options.selector_ignore_prefixes == ['.', '_']
assert options.exclude_invalid_files is False
factory = ds.FileSystemDatasetFactory(
mockfs, paths_or_selector, format, options
)
inspected_schema = factory.inspect()
assert factory.inspect().equals(pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64()),
pa.field('str', pa.dictionary(pa.int32(), pa.string())),
pa.field('const', pa.int64()),
pa.field('group', pa.int32()),
pa.field('key', pa.string()),
]), check_metadata=False)
assert isinstance(factory.inspect_schemas(), list)
assert isinstance(factory.finish(inspected_schema),
ds.FileSystemDataset)
assert factory.root_partition.equals(ds.scalar(True))
dataset = factory.finish()
assert isinstance(dataset, ds.FileSystemDataset)
assert len(list(dataset.scan())) == 2
scanner = ds.Scanner.from_dataset(dataset)
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
expected_str = pa.DictionaryArray.from_arrays(
pa.array([0, 1, 2, 3, 4], type=pa.int32()),
pa.array("0 1 2 3 4".split(), type=pa.string())
)
for task, group, key in zip(scanner.scan(), [1, 2], ['xxx', 'yyy']):
expected_group = pa.array([group] * 5, type=pa.int32())
expected_key = pa.array([key] * 5, type=pa.string())
expected_const = pa.array([group - 1] * 5, type=pa.int64())
for batch in task.execute():
assert batch.num_columns == 6
assert batch[0].equals(expected_i64)
assert batch[1].equals(expected_f64)
assert batch[2].equals(expected_str)
assert batch[3].equals(expected_const)
assert batch[4].equals(expected_group)
assert batch[5].equals(expected_key)
table = dataset.to_table()
assert isinstance(table, pa.Table)
assert len(table) == 10
assert table.num_columns == 6
def test_make_fragment(multisourcefs):
parquet_format = ds.ParquetFileFormat()
dataset = ds.dataset('/plain', filesystem=multisourcefs,
format=parquet_format)
for path in dataset.files:
fragment = parquet_format.make_fragment(path, multisourcefs)
assert fragment.row_groups == [0]
row_group_fragment = parquet_format.make_fragment(path, multisourcefs,
row_groups=[0])
for f in [fragment, row_group_fragment]:
assert isinstance(f, ds.ParquetFileFragment)
assert f.path == path
assert isinstance(f.filesystem, type(multisourcefs))
assert row_group_fragment.row_groups == [0]
def test_make_csv_fragment_from_buffer():
content = textwrap.dedent("""
alpha,num,animal
a,12,dog
b,11,cat
c,10,rabbit
""")
buffer = pa.py_buffer(content.encode('utf-8'))
csv_format = ds.CsvFileFormat()
fragment = csv_format.make_fragment(buffer)
expected = pa.table([['a', 'b', 'c'],
[12, 11, 10],
['dog', 'cat', 'rabbit']],
names=['alpha', 'num', 'animal'])
assert fragment.to_table().equals(expected)
pickled = pickle.loads(pickle.dumps(fragment))
assert pickled.to_table().equals(fragment.to_table())
@pytest.mark.parquet
def test_make_parquet_fragment_from_buffer():
import pyarrow.parquet as pq
arrays = [
pa.array(['a', 'b', 'c']),
pa.array([12, 11, 10]),
pa.array(['dog', 'cat', 'rabbit'])
]
dictionary_arrays = [
arrays[0].dictionary_encode(),
arrays[1],
arrays[2].dictionary_encode()
]
dictionary_format = ds.ParquetFileFormat(
read_options=ds.ParquetReadOptions(
use_buffered_stream=True,
buffer_size=4096,
dictionary_columns=['alpha', 'animal']
)
)
cases = [
(arrays, ds.ParquetFileFormat()),
(dictionary_arrays, dictionary_format)
]
for arrays, format_ in cases:
table = pa.table(arrays, names=['alpha', 'num', 'animal'])
out = pa.BufferOutputStream()
pq.write_table(table, out)
buffer = out.getvalue()
fragment = format_.make_fragment(buffer)
assert fragment.to_table().equals(table)
pickled = pickle.loads(pickle.dumps(fragment))
assert pickled.to_table().equals(table)
def _create_dataset_for_fragments(tempdir, chunk_size=None, filesystem=None):
import pyarrow.parquet as pq
table = pa.table(
[range(8), [1] * 8, ['a'] * 4 + ['b'] * 4],
names=['f1', 'f2', 'part']
)
path = str(tempdir / "test_parquet_dataset")
# write_to_dataset currently requires pandas
pq.write_to_dataset(table, path,
partition_cols=["part"], chunk_size=chunk_size)
dataset = ds.dataset(
path, format="parquet", partitioning="hive", filesystem=filesystem
)
return table, dataset
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments(tempdir):
table, dataset = _create_dataset_for_fragments(tempdir)
# list fragments
fragments = list(dataset.get_fragments())
assert len(fragments) == 2
f = fragments[0]
physical_names = ['f1', 'f2']
# file's schema does not include partition column
assert f.physical_schema.names == physical_names
assert f.format.inspect(f.path, f.filesystem) == f.physical_schema
assert f.partition_expression.equals(ds.field('part') == 'a')
# By default, the partition column is not part of the schema.
result = f.to_table()
assert result.column_names == physical_names
assert result.equals(table.remove_column(2).slice(0, 4))
# scanning fragment includes partition columns when given the proper
# schema.
result = f.to_table(schema=dataset.schema)
assert result.column_names == ['f1', 'f2', 'part']
assert result.equals(table.slice(0, 4))
assert f.physical_schema == result.schema.remove(2)
# scanning fragments follow filter predicate
result = f.to_table(schema=dataset.schema, filter=ds.field('f1') < 2)
assert result.column_names == ['f1', 'f2', 'part']
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_implicit_cast(tempdir):
# ARROW-8693
import pyarrow.parquet as pq
table = pa.table([range(8), [1] * 4 + [2] * 4], names=['col', 'part'])
path = str(tempdir / "test_parquet_dataset")
pq.write_to_dataset(table, path, partition_cols=["part"])
part = ds.partitioning(pa.schema([('part', 'int8')]), flavor="hive")
dataset = ds.dataset(path, format="parquet", partitioning=part)
fragments = dataset.get_fragments(filter=ds.field("part") >= 2)
assert len(list(fragments)) == 1
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_reconstruct(tempdir):
table, dataset = _create_dataset_for_fragments(tempdir)
def assert_yields_projected(fragment, row_slice,
columns=None, filter=None):
actual = fragment.to_table(
schema=table.schema, columns=columns, filter=filter)
column_names = columns if columns else table.column_names
assert actual.column_names == column_names
expected = table.slice(*row_slice).select(column_names)
assert actual.equals(expected)
fragment = list(dataset.get_fragments())[0]
parquet_format = fragment.format
# test pickle roundtrip
pickled_fragment = pickle.loads(pickle.dumps(fragment))
assert pickled_fragment.to_table() == fragment.to_table()
# manually re-construct a fragment, with explicit schema
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert new_fragment.to_table().equals(fragment.to_table())
assert_yields_projected(new_fragment, (0, 4))
# filter / column projection, inspected schema
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert_yields_projected(new_fragment, (0, 2), filter=ds.field('f1') < 2)
# filter requiring cast / column projection, inspected schema
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert_yields_projected(new_fragment, (0, 2),
columns=['f1'], filter=ds.field('f1') < 2.0)
# filter on the partition column
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert_yields_projected(new_fragment, (0, 4),
filter=ds.field('part') == 'a')
# Fragments don't contain the partition's columns if not provided to the
# `to_table(schema=...)` method.
with pytest.raises(ValueError, match="Field named 'part' not found"):
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
new_fragment.to_table(filter=ds.field('part') == 'a')
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_row_groups(tempdir):
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)
fragment = list(dataset.get_fragments())[0]
# list and scan row group fragments
row_group_fragments = list(fragment.split_by_row_group())
assert len(row_group_fragments) == fragment.num_row_groups == 2
result = row_group_fragments[0].to_table(schema=dataset.schema)
assert result.column_names == ['f1', 'f2', 'part']
assert len(result) == 2
assert result.equals(table.slice(0, 2))
assert row_group_fragments[0].row_groups is not None
assert row_group_fragments[0].num_row_groups == 1
assert row_group_fragments[0].row_groups[0].statistics == {
'f1': {'min': 0, 'max': 1},
'f2': {'min': 1, 'max': 1},
}
fragment = list(dataset.get_fragments(filter=ds.field('f1') < 1))[0]
row_group_fragments = list(fragment.split_by_row_group(ds.field('f1') < 1))
assert len(row_group_fragments) == 1
result = row_group_fragments[0].to_table(filter=ds.field('f1') < 1)
assert len(result) == 1
@pytest.mark.parquet
def test_fragments_parquet_num_row_groups(tempdir):
import pyarrow.parquet as pq
table = pa.table({'a': range(8)})
pq.write_table(table, tempdir / "test.parquet", row_group_size=2)
dataset = ds.dataset(tempdir / "test.parquet", format="parquet")
original_fragment = list(dataset.get_fragments())[0]
# create fragment with subset of row groups
fragment = original_fragment.format.make_fragment(
original_fragment.path, original_fragment.filesystem,
row_groups=[1, 3])
assert fragment.num_row_groups == 2
# ensure that parsing metadata preserves correct number of row groups
fragment.ensure_complete_metadata()
assert fragment.num_row_groups == 2
assert len(fragment.row_groups) == 2
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_row_groups_dictionary(tempdir):
import pandas as pd
df = pd.DataFrame(dict(col1=['a', 'b'], col2=[1, 2]))
df['col1'] = df['col1'].astype("category")
import pyarrow.parquet as pq
pq.write_table(pa.table(df), tempdir / "test_filter_dictionary.parquet")
import pyarrow.dataset as ds
dataset = ds.dataset(tempdir / 'test_filter_dictionary.parquet')
result = dataset.to_table(filter=ds.field("col1") == "a")
assert (df.iloc[0] == result.to_pandas()).all().all()
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_ensure_metadata(tempdir, open_logging_fs):
fs, assert_opens = open_logging_fs
_, dataset = _create_dataset_for_fragments(
tempdir, chunk_size=2, filesystem=fs
)
fragment = list(dataset.get_fragments())[0]
# with default discovery, no metadata loaded
with assert_opens([fragment.path]):
fragment.ensure_complete_metadata()
assert fragment.row_groups == [0, 1]
# second time -> use cached / no file IO
with assert_opens([]):
fragment.ensure_complete_metadata()
# recreate fragment with row group ids
new_fragment = fragment.format.make_fragment(
fragment.path, fragment.filesystem, row_groups=[0, 1]
)
assert new_fragment.row_groups == fragment.row_groups
# collect metadata
new_fragment.ensure_complete_metadata()
row_group = new_fragment.row_groups[0]
assert row_group.id == 0
assert row_group.num_rows == 2
assert row_group.statistics is not None
# pickling preserves row group ids
pickled_fragment = pickle.loads(pickle.dumps(new_fragment))
with assert_opens([fragment.path]):
assert pickled_fragment.row_groups == [0, 1]
row_group = pickled_fragment.row_groups[0]
assert row_group.id == 0
assert row_group.statistics is not None
def _create_dataset_all_types(tempdir, chunk_size=None):
import pyarrow.parquet as pq
table = pa.table(
[
pa.array([True, None, False], pa.bool_()),
pa.array([1, 10, 42], pa.int8()),
pa.array([1, 10, 42], pa.uint8()),
pa.array([1, 10, 42], pa.int16()),
pa.array([1, 10, 42], pa.uint16()),
pa.array([1, 10, 42], pa.int32()),
pa.array([1, 10, 42], pa.uint32()),
pa.array([1, 10, 42], pa.int64()),
pa.array([1, 10, 42], pa.uint64()),
pa.array([1.0, 10.0, 42.0], pa.float32()),
pa.array([1.0, 10.0, 42.0], pa.float64()),
pa.array(['a', None, 'z'], pa.utf8()),
pa.array(['a', None, 'z'], pa.binary()),
pa.array([1, 10, 42], pa.timestamp('s')),
pa.array([1, 10, 42], pa.timestamp('ms')),
pa.array([1, 10, 42], pa.timestamp('us')),
pa.array([1, 10, 42], pa.date32()),
pa.array([1, 10, 4200000000], pa.date64()),
pa.array([1, 10, 42], pa.time32('s')),
pa.array([1, 10, 42], pa.time64('us')),
],
names=[
'boolean',
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'int64',
'uint64',
'float',
'double',
'utf8',
'binary',
'ts[s]',
'ts[ms]',
'ts[us]',
'date32',
'date64',
'time32',
'time64',
]
)
path = str(tempdir / "test_parquet_dataset_all_types")
# write_to_dataset currently requires pandas
pq.write_to_dataset(table, path, chunk_size=chunk_size)
return table, ds.dataset(path, format="parquet", partitioning="hive")
@pytest.mark.pandas
@pytest.mark.parquet
def test_parquet_fragment_statistics(tempdir):
table, dataset = _create_dataset_all_types(tempdir)
fragment = list(dataset.get_fragments())[0]
import datetime
def dt_s(x): return datetime.datetime(1970, 1, 1, 0, 0, x)
def dt_ms(x): return datetime.datetime(1970, 1, 1, 0, 0, 0, x*1000)
def dt_us(x): return datetime.datetime(1970, 1, 1, 0, 0, 0, x)
date = datetime.date
time = datetime.time
# list and scan row group fragments
row_group_fragments = list(fragment.split_by_row_group())
assert row_group_fragments[0].row_groups is not None
row_group = row_group_fragments[0].row_groups[0]
assert row_group.num_rows == 3
assert row_group.total_byte_size > 1000
assert row_group.statistics == {
'boolean': {'min': False, 'max': True},
'int8': {'min': 1, 'max': 42},
'uint8': {'min': 1, 'max': 42},
'int16': {'min': 1, 'max': 42},
'uint16': {'min': 1, 'max': 42},
'int32': {'min': 1, 'max': 42},
'uint32': {'min': 1, 'max': 42},
'int64': {'min': 1, 'max': 42},
'uint64': {'min': 1, 'max': 42},
'float': {'min': 1.0, 'max': 42.0},
'double': {'min': 1.0, 'max': 42.0},
'utf8': {'min': 'a', 'max': 'z'},
'binary': {'min': b'a', 'max': b'z'},
'ts[s]': {'min': dt_s(1), 'max': dt_s(42)},
'ts[ms]': {'min': dt_ms(1), 'max': dt_ms(42)},
'ts[us]': {'min': dt_us(1), 'max': dt_us(42)},
'date32': {'min': date(1970, 1, 2), 'max': date(1970, 2, 12)},
'date64': {'min': date(1970, 1, 1), 'max': date(1970, 2, 18)},
'time32': {'min': time(0, 0, 1), 'max': time(0, 0, 42)},
'time64': {'min': time(0, 0, 0, 1), 'max': time(0, 0, 0, 42)},
}
@pytest.mark.parquet
def test_parquet_fragment_statistics_nulls(tempdir):
import pyarrow.parquet as pq
table = pa.table({'a': [0, 1, None, None], 'b': ['a', 'b', None, None]})
pq.write_table(table, tempdir / "test.parquet", row_group_size=2)
dataset = ds.dataset(tempdir / "test.parquet", format="parquet")
fragments = list(dataset.get_fragments())[0].split_by_row_group()
# second row group has all nulls -> no statistics
assert fragments[1].row_groups[0].statistics == {}
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_row_groups_predicate(tempdir):
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)
fragment = list(dataset.get_fragments())[0]
assert fragment.partition_expression.equals(ds.field('part') == 'a')
# predicate may reference a partition field not present in the
# physical_schema if an explicit schema is provided to split_by_row_group
# filter matches partition_expression: all row groups
row_group_fragments = list(
fragment.split_by_row_group(filter=ds.field('part') == 'a',
schema=dataset.schema))
assert len(row_group_fragments) == 2
# filter contradicts partition_expression: no row groups
row_group_fragments = list(
fragment.split_by_row_group(filter=ds.field('part') == 'b',
schema=dataset.schema))
assert len(row_group_fragments) == 0
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_row_groups_reconstruct(tempdir):
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)
fragment = list(dataset.get_fragments())[0]
parquet_format = fragment.format
row_group_fragments = list(fragment.split_by_row_group())
# test pickle roundtrip
pickled_fragment = pickle.loads(pickle.dumps(fragment))
assert pickled_fragment.to_table() == fragment.to_table()
# manually re-construct row group fragments
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression,
row_groups=[0])
result = new_fragment.to_table()
assert result.equals(row_group_fragments[0].to_table())
# manually re-construct a row group fragment with filter/column projection
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression,
row_groups={1})
result = new_fragment.to_table(schema=table.schema, columns=['f1', 'part'],
filter=ds.field('f1') < 3, )
assert result.column_names == ['f1', 'part']
assert len(result) == 1
# out of bounds row group index
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression,
row_groups={2})
with pytest.raises(IndexError, match="references row group 2"):
new_fragment.to_table()
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_subset_ids(tempdir, open_logging_fs):
fs, assert_opens = open_logging_fs
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1,
filesystem=fs)
fragment = list(dataset.get_fragments())[0]
# select with row group ids
subfrag = fragment.subset(row_group_ids=[0, 3])
with assert_opens([]):
assert subfrag.num_row_groups == 2
assert subfrag.row_groups == [0, 3]
assert subfrag.row_groups[0].statistics is not None
# check correct scan result of subset
result = subfrag.to_table()
assert result.to_pydict() == {"f1": [0, 3], "f2": [1, 1]}
# empty list of ids
subfrag = fragment.subset(row_group_ids=[])
assert subfrag.num_row_groups == 0
assert subfrag.row_groups == []
result = subfrag.to_table(schema=dataset.schema)
assert result.num_rows == 0
assert result.equals(table[:0])
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_subset_filter(tempdir, open_logging_fs):
fs, assert_opens = open_logging_fs
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1,
filesystem=fs)
fragment = list(dataset.get_fragments())[0]
# select with filter
subfrag = fragment.subset(ds.field("f1") >= 1)
with assert_opens([]):
assert subfrag.num_row_groups == 3
assert len(subfrag.row_groups) == 3
assert subfrag.row_groups[0].statistics is not None
# check correct scan result of subset
result = subfrag.to_table()
assert result.to_pydict() == {"f1": [1, 2, 3], "f2": [1, 1, 1]}
# filter that results in empty selection
subfrag = fragment.subset(ds.field("f1") > 5)
assert subfrag.num_row_groups == 0
assert subfrag.row_groups == []
result = subfrag.to_table(schema=dataset.schema)
assert result.num_rows == 0
assert result.equals(table[:0])
# passing schema to ensure filter on partition expression works
subfrag = fragment.subset(ds.field("part") == "a", schema=dataset.schema)
assert subfrag.num_row_groups == 4
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_subset_invalid(tempdir):
_, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1)
fragment = list(dataset.get_fragments())[0]
# passing none or both of filter / row_group_ids
with pytest.raises(ValueError):
fragment.subset(ds.field("f1") >= 1, row_group_ids=[1, 2])
with pytest.raises(ValueError):
fragment.subset()
def test_partitioning_factory(mockfs):
paths_or_selector = fs.FileSelector('subdir', recursive=True)
format = ds.ParquetFileFormat()
options = ds.FileSystemFactoryOptions('subdir')
partitioning_factory = ds.DirectoryPartitioning.discover(['group', 'key'])
assert isinstance(partitioning_factory, ds.PartitioningFactory)
options.partitioning_factory = partitioning_factory
factory = ds.FileSystemDatasetFactory(
mockfs, paths_or_selector, format, options
)
inspected_schema = factory.inspect()
# i64/f64 from data, group/key from "/1/xxx" and "/2/yyy" paths
expected_schema = pa.schema([
("i64", pa.int64()),
("f64", pa.float64()),
("str", pa.string()),
("const", pa.int64()),
("group", pa.int32()),
("key", pa.string()),
])
assert inspected_schema.equals(expected_schema)
hive_partitioning_factory = ds.HivePartitioning.discover()
assert isinstance(hive_partitioning_factory, ds.PartitioningFactory)
@pytest.mark.parametrize('infer_dictionary', [False, True])
def test_partitioning_factory_dictionary(mockfs, infer_dictionary):
paths_or_selector = fs.FileSelector('subdir', recursive=True)
format = ds.ParquetFileFormat()
options = ds.FileSystemFactoryOptions('subdir')
options.partitioning_factory = ds.DirectoryPartitioning.discover(
['group', 'key'], infer_dictionary=infer_dictionary)
factory = ds.FileSystemDatasetFactory(
mockfs, paths_or_selector, format, options)
inferred_schema = factory.inspect()
if infer_dictionary:
expected_type = pa.dictionary(pa.int32(), pa.string())
assert inferred_schema.field('key').type == expected_type
table = factory.finish().to_table().combine_chunks()
actual = table.column('key').chunk(0)
expected = pa.array(['xxx'] * 5 + ['yyy'] * 5).dictionary_encode()
assert actual.equals(expected)
# ARROW-9345 ensure filtering on the partition field works
table = factory.finish().to_table(filter=ds.field('key') == 'xxx')
actual = table.column('key').chunk(0)
expected = expected.slice(0, 5)
assert actual.equals(expected)
else:
assert inferred_schema.field('key').type == pa.string()
def test_partitioning_function():
schema = pa.schema([("year", pa.int16()), ("month", pa.int8())])
names = ["year", "month"]
# default DirectoryPartitioning
part = ds.partitioning(schema)
assert isinstance(part, ds.DirectoryPartitioning)
part = ds.partitioning(field_names=names)
assert isinstance(part, ds.PartitioningFactory)
# needs schema or list of names
with pytest.raises(ValueError):
ds.partitioning()
with pytest.raises(ValueError, match="Expected list"):
ds.partitioning(field_names=schema)
with pytest.raises(ValueError, match="Cannot specify both"):
ds.partitioning(schema, field_names=schema)
# Hive partitioning
part = ds.partitioning(schema, flavor="hive")
assert isinstance(part, ds.HivePartitioning)
part = ds.partitioning(flavor="hive")
assert isinstance(part, ds.PartitioningFactory)
# cannot pass list of names
with pytest.raises(ValueError):
ds.partitioning(names, flavor="hive")
with pytest.raises(ValueError, match="Cannot specify 'field_names'"):
ds.partitioning(field_names=names, flavor="hive")
# unsupported flavor
with pytest.raises(ValueError):
ds.partitioning(schema, flavor="unsupported")
def _create_single_file(base_dir, table=None, row_group_size=None):
import pyarrow.parquet as pq
if table is None:
table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
path = base_dir / "test.parquet"
pq.write_table(table, path, row_group_size=row_group_size)
return table, path
def _create_directory_of_files(base_dir):
import pyarrow.parquet as pq
table1 = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
path1 = base_dir / "test1.parquet"
pq.write_table(table1, path1)
table2 = pa.table({'a': range(9, 18), 'b': [0.] * 4 + [1.] * 5})
path2 = base_dir / "test2.parquet"
pq.write_table(table2, path2)
return (table1, table2), (path1, path2)
def _check_dataset(dataset, table):
# also test that pickle roundtrip keeps the functionality
for d in [dataset, pickle.loads(pickle.dumps(dataset))]:
assert dataset.schema.equals(table.schema)
assert dataset.to_table().equals(table)
def _check_dataset_from_path(path, table, **kwargs):
# pathlib object
assert isinstance(path, pathlib.Path)
# accept Path, str, List[Path], List[str]
for p in [path, str(path), [path], [str(path)]]:
dataset = ds.dataset(path, **kwargs)
assert isinstance(dataset, ds.FileSystemDataset)
_check_dataset(dataset, table)
# relative string path
with change_cwd(path.parent):
dataset = ds.dataset(path.name, **kwargs)
assert isinstance(dataset, ds.FileSystemDataset)
_check_dataset(dataset, table)
@pytest.mark.parquet
def test_open_dataset_single_file(tempdir):
table, path = _create_single_file(tempdir)
_check_dataset_from_path(path, table)
@pytest.mark.parquet
def test_deterministic_row_order(tempdir):
# ARROW-8447 Ensure that dataset.to_table (and Scanner::ToTable) returns a
# deterministic row ordering. This is achieved by constructing a single
# parquet file with one row per RowGroup.
table, path = _create_single_file(tempdir, row_group_size=1)
_check_dataset_from_path(path, table)
@pytest.mark.parquet
def test_open_dataset_directory(tempdir):
tables, _ = _create_directory_of_files(tempdir)
table = pa.concat_tables(tables)
_check_dataset_from_path(tempdir, table)
@pytest.mark.parquet
def test_open_dataset_list_of_files(tempdir):
tables, (path1, path2) = _create_directory_of_files(tempdir)
table = pa.concat_tables(tables)
datasets = [
ds.dataset([path1, path2]),
ds.dataset([str(path1), str(path2)])
]
datasets += [
pickle.loads(pickle.dumps(d)) for d in datasets
]
for dataset in datasets:
assert dataset.schema.equals(table.schema)
result = dataset.to_table()
assert result.equals(table)
def test_construct_from_single_file(tempdir):
directory = tempdir / 'single-file'
directory.mkdir()
table, path = _create_single_file(directory)
relative_path = path.relative_to(directory)
# instantiate from a single file
d1 = ds.dataset(path)
# instantiate from a single file with a filesystem object
d2 = ds.dataset(path, filesystem=fs.LocalFileSystem())
# instantiate from a single file with prefixed filesystem URI
d3 = ds.dataset(relative_path, filesystem=_filesystem_uri(directory))
# pickle roundtrip
d4 = pickle.loads(pickle.dumps(d1))
assert d1.to_table() == d2.to_table() == d3.to_table() == d4.to_table()
def test_construct_from_single_directory(tempdir):
directory = tempdir / 'single-directory'
directory.mkdir()
tables, paths = _create_directory_of_files(directory)
d1 = ds.dataset(directory)
d2 = ds.dataset(directory, filesystem=fs.LocalFileSystem())
d3 = ds.dataset(directory.name, filesystem=_filesystem_uri(tempdir))
t1 = d1.to_table()
t2 = d2.to_table()
t3 = d3.to_table()
assert t1 == t2 == t3
# test pickle roundtrip
for d in [d1, d2, d3]:
restored = pickle.loads(pickle.dumps(d))
assert restored.to_table() == t1
def test_construct_from_list_of_files(tempdir):
# instantiate from a list of files
directory = tempdir / 'list-of-files'
directory.mkdir()
tables, paths = _create_directory_of_files(directory)
relative_paths = [p.relative_to(tempdir) for p in paths]
with change_cwd(tempdir):
d1 = ds.dataset(relative_paths)
t1 = d1.to_table()
assert len(t1) == sum(map(len, tables))
d2 = ds.dataset(relative_paths, filesystem=_filesystem_uri(tempdir))
t2 = d2.to_table()
d3 = ds.dataset(paths)
t3 = d3.to_table()
d4 = ds.dataset(paths, filesystem=fs.LocalFileSystem())
t4 = d4.to_table()
assert t1 == t2 == t3 == t4
def test_construct_from_list_of_mixed_paths_fails(mockfs):
# isntantiate from a list of mixed paths
files = [
'subdir/1/xxx/file0.parquet',
'subdir/1/xxx/doesnt-exist.parquet',
]
with pytest.raises(FileNotFoundError, match='doesnt-exist'):
ds.dataset(files, filesystem=mockfs)
def test_construct_from_mixed_child_datasets(mockfs):
# isntantiate from a list of mixed paths
a = ds.dataset(['subdir/1/xxx/file0.parquet',
'subdir/2/yyy/file1.parquet'], filesystem=mockfs)
b = ds.dataset('subdir', filesystem=mockfs)
dataset = ds.dataset([a, b])
assert isinstance(dataset, ds.UnionDataset)
assert len(list(dataset.get_fragments())) == 4
table = dataset.to_table()
assert len(table) == 20
assert table.num_columns == 4
assert len(dataset.children) == 2
for child in dataset.children:
assert child.files == ['subdir/1/xxx/file0.parquet',
'subdir/2/yyy/file1.parquet']
def test_construct_empty_dataset():
empty = ds.dataset([])
table = empty.to_table()
assert table.num_rows == 0
assert table.num_columns == 0
empty = ds.dataset([], schema=pa.schema([
('a', pa.int64()),
('a', pa.string())
]))
table = empty.to_table()
assert table.num_rows == 0
assert table.num_columns == 2
def test_construct_from_invalid_sources_raise(multisourcefs):
child1 = ds.FileSystemDatasetFactory(
multisourcefs,
fs.FileSelector('/plain'),
format=ds.ParquetFileFormat()
)
child2 = ds.FileSystemDatasetFactory(
multisourcefs,
fs.FileSelector('/schema'),
format=ds.ParquetFileFormat()
)
with pytest.raises(TypeError, match='Expected.*FileSystemDatasetFactory'):
ds.dataset([child1, child2])
expected = (
"Expected a list of path-like or dataset objects. The given list "
"contains the following types: int"
)
with pytest.raises(TypeError, match=expected):
ds.dataset([1, 2, 3])
expected = (
"Expected a path-like, list of path-likes or a list of Datasets "
"instead of the given type: NoneType"
)
with pytest.raises(TypeError, match=expected):
ds.dataset(None)
@pytest.mark.parquet
def test_open_dataset_partitioned_directory(tempdir):
import pyarrow.parquet as pq
table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
path = tempdir / "dataset"
path.mkdir()
for part in range(3):
part = path / "part={}".format(part)
part.mkdir()
pq.write_table(table, part / "test.parquet")
# no partitioning specified, just read all individual files
full_table = pa.concat_tables([table] * 3)
_check_dataset_from_path(path, full_table)
# specify partition scheme with discovery
dataset = ds.dataset(
str(path), partitioning=ds.partitioning(flavor="hive"))
expected_schema = table.schema.append(pa.field("part", pa.int32()))
assert dataset.schema.equals(expected_schema)
# specify partition scheme with discovery and relative path
with change_cwd(tempdir):
dataset = ds.dataset(
"dataset/", partitioning=ds.partitioning(flavor="hive"))
expected_schema = table.schema.append(pa.field("part", pa.int32()))
assert dataset.schema.equals(expected_schema)
# specify partition scheme with string short-cut
dataset = ds.dataset(str(path), partitioning="hive")
assert dataset.schema.equals(expected_schema)
# specify partition scheme with explicit scheme
dataset = ds.dataset(
str(path),
partitioning=ds.partitioning(
pa.schema([("part", pa.int8())]), flavor="hive"))
expected_schema = table.schema.append(pa.field("part", pa.int8()))
assert dataset.schema.equals(expected_schema)
result = dataset.to_table()
expected = full_table.append_column(
"part", pa.array(np.repeat([0, 1, 2], 9), type=pa.int8()))
assert result.equals(expected)
@pytest.mark.parquet
def test_open_dataset_filesystem(tempdir):
# single file
table, path = _create_single_file(tempdir)
# filesystem inferred from path
dataset1 = ds.dataset(str(path))
assert dataset1.schema.equals(table.schema)
# filesystem specified
dataset2 = ds.dataset(str(path), filesystem=fs.LocalFileSystem())
assert dataset2.schema.equals(table.schema)
# local filesystem specified with relative path
with change_cwd(tempdir):
dataset3 = ds.dataset("test.parquet", filesystem=fs.LocalFileSystem())
assert dataset3.schema.equals(table.schema)
# passing different filesystem
with pytest.raises(FileNotFoundError):
ds.dataset(str(path), filesystem=fs._MockFileSystem())
@pytest.mark.parquet
def test_open_dataset_unsupported_format(tempdir):
_, path = _create_single_file(tempdir)
with pytest.raises(ValueError, match="format 'blabla' is not supported"):
ds.dataset([path], format="blabla")
@pytest.mark.parquet
def test_open_union_dataset(tempdir):
_, path = _create_single_file(tempdir)
dataset = ds.dataset(path)
union = ds.dataset([dataset, dataset])
assert isinstance(union, ds.UnionDataset)
pickled = pickle.loads(pickle.dumps(union))
assert pickled.to_table() == union.to_table()
def test_open_union_dataset_with_additional_kwargs(multisourcefs):
child = ds.dataset('/plain', filesystem=multisourcefs, format='parquet')
with pytest.raises(ValueError, match="cannot pass any additional"):
ds.dataset([child], format="parquet")
def test_open_dataset_non_existing_file():
# ARROW-8213: Opening a dataset with a local incorrect path gives confusing
# error message
with pytest.raises(FileNotFoundError):
ds.dataset('i-am-not-existing.parquet', format='parquet')
with pytest.raises(pa.ArrowInvalid, match='cannot be relative'):
ds.dataset('file:i-am-not-existing.parquet', format='parquet')
@pytest.mark.parquet
@pytest.mark.parametrize('partitioning', ["directory", "hive"])
@pytest.mark.parametrize('partition_keys', [
(["A", "B", "C"], [1, 2, 3]),
([1, 2, 3], ["A", "B", "C"]),
(["A", "B", "C"], ["D", "E", "F"]),
([1, 2, 3], [4, 5, 6]),
])
def test_open_dataset_partitioned_dictionary_type(tempdir, partitioning,
partition_keys):
# ARROW-9288 / ARROW-9476
import pyarrow.parquet as pq
table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
if partitioning == "directory":
partitioning = ds.DirectoryPartitioning.discover(
["part1", "part2"], infer_dictionary=True)
fmt = "{0}/{1}"
else:
partitioning = ds.HivePartitioning.discover(infer_dictionary=True)
fmt = "part1={0}/part2={1}"
basepath = tempdir / "dataset"
basepath.mkdir()
part_keys1, part_keys2 = partition_keys
for part1 in part_keys1:
for part2 in part_keys2:
path = basepath / fmt.format(part1, part2)
path.mkdir(parents=True)
pq.write_table(table, path / "test.parquet")
dataset = ds.dataset(str(basepath), partitioning=partitioning)
def dict_type(key):
value_type = pa.string() if isinstance(key, str) else pa.int32()
return pa.dictionary(pa.int32(), value_type)
expected_schema = table.schema.append(
pa.field("part1", dict_type(part_keys1[0]))
).append(
pa.field("part2", dict_type(part_keys2[0]))
)
assert dataset.schema.equals(expected_schema)
@pytest.fixture
def s3_example_simple(s3_connection, s3_server):
from pyarrow.fs import FileSystem
import pyarrow.parquet as pq
host, port, access_key, secret_key = s3_connection
uri = (
"s3://{}:{}@mybucket/data.parquet?scheme=http&endpoint_override={}:{}"
.format(access_key, secret_key, host, port)
)
fs, path = FileSystem.from_uri(uri)
fs.create_dir("mybucket")
table = pa.table({'a': [1, 2, 3]})
with fs.open_output_stream("mybucket/data.parquet") as out:
pq.write_table(table, out)
return table, path, fs, uri, host, port, access_key, secret_key
@pytest.mark.parquet
@pytest.mark.s3
def test_open_dataset_from_uri_s3(s3_example_simple):
# open dataset from non-localfs string path
table, path, fs, uri, _, _, _, _ = s3_example_simple
# full string URI
dataset = ds.dataset(uri, format="parquet")
assert dataset.to_table().equals(table)
# passing filesystem object
dataset = ds.dataset(path, format="parquet", filesystem=fs)
assert dataset.to_table().equals(table)
@pytest.mark.parquet
@pytest.mark.s3 # still needed to create the data
def test_open_dataset_from_uri_s3_fsspec(s3_example_simple):
table, path, _, _, host, port, access_key, secret_key = s3_example_simple
s3fs = pytest.importorskip("s3fs")
from pyarrow.fs import PyFileSystem, FSSpecHandler
fs = s3fs.S3FileSystem(
key=access_key,
secret=secret_key,
client_kwargs={
'endpoint_url': 'http://{}:{}'.format(host, port)
}
)
# passing as fsspec filesystem
dataset = ds.dataset(path, format="parquet", filesystem=fs)
assert dataset.to_table().equals(table)
# directly passing the fsspec-handler
fs = PyFileSystem(FSSpecHandler(fs))
dataset = ds.dataset(path, format="parquet", filesystem=fs)
assert dataset.to_table().equals(table)
@pytest.mark.parquet
@pytest.mark.s3
def test_open_dataset_from_s3_with_filesystem_uri(s3_connection, s3_server):
from pyarrow.fs import FileSystem
import pyarrow.parquet as pq
host, port, access_key, secret_key = s3_connection
bucket = 'theirbucket'
path = 'nested/folder/data.parquet'
uri = "s3://{}:{}@{}/{}?scheme=http&endpoint_override={}:{}".format(
access_key, secret_key, bucket, path, host, port
)
fs, path = FileSystem.from_uri(uri)
assert path == 'theirbucket/nested/folder/data.parquet'
fs.create_dir(bucket)
table = pa.table({'a': [1, 2, 3]})
with fs.open_output_stream(path) as out:
pq.write_table(table, out)
# full string URI
dataset = ds.dataset(uri, format="parquet")
assert dataset.to_table().equals(table)
# passing filesystem as an uri
template = (
"s3://{}:{}@{{}}?scheme=http&endpoint_override={}:{}".format(
access_key, secret_key, host, port
)
)
cases = [
('theirbucket/nested/folder/', '/data.parquet'),
('theirbucket/nested/folder', 'data.parquet'),
('theirbucket/nested/', 'folder/data.parquet'),
('theirbucket/nested', 'folder/data.parquet'),
('theirbucket', '/nested/folder/data.parquet'),
('theirbucket', 'nested/folder/data.parquet'),
]
for prefix, path in cases:
uri = template.format(prefix)
dataset = ds.dataset(path, filesystem=uri, format="parquet")
assert dataset.to_table().equals(table)
with pytest.raises(pa.ArrowInvalid, match='Missing bucket name'):
uri = template.format('/')
ds.dataset('/theirbucket/nested/folder/data.parquet', filesystem=uri)
error = (
"The path component of the filesystem URI must point to a directory "
"but it has a type: `{}`. The path component is `{}` and the given "
"filesystem URI is `{}`"
)
path = 'theirbucket/doesnt/exist'
uri = template.format(path)
with pytest.raises(ValueError) as exc:
ds.dataset('data.parquet', filesystem=uri)
assert str(exc.value) == error.format('NotFound', path, uri)
path = 'theirbucket/nested/folder/data.parquet'
uri = template.format(path)
with pytest.raises(ValueError) as exc:
ds.dataset('data.parquet', filesystem=uri)
assert str(exc.value) == error.format('File', path, uri)
@pytest.mark.parquet
def test_open_dataset_from_fsspec(tempdir):
table, path = _create_single_file(tempdir)
fsspec = pytest.importorskip("fsspec")
localfs = fsspec.filesystem("file")
dataset = ds.dataset(path, filesystem=localfs)
assert dataset.schema.equals(table.schema)
@pytest.mark.parquet
def test_filter_implicit_cast(tempdir):
# ARROW-7652
table = pa.table({'a': pa.array([0, 1, 2, 3, 4, 5], type=pa.int8())})
_, path = _create_single_file(tempdir, table)
dataset = ds.dataset(str(path))
filter_ = ds.field('a') > 2
assert len(dataset.to_table(filter=filter_)) == 3
def test_dataset_union(multisourcefs):
child = ds.FileSystemDatasetFactory(
multisourcefs, fs.FileSelector('/plain'),
format=ds.ParquetFileFormat()
)
factory = ds.UnionDatasetFactory([child])
# TODO(bkietz) reintroduce factory.children property
assert len(factory.inspect_schemas()) == 1
assert all(isinstance(s, pa.Schema) for s in factory.inspect_schemas())
assert factory.inspect_schemas()[0].equals(child.inspect())
assert factory.inspect().equals(child.inspect())
assert isinstance(factory.finish(), ds.Dataset)
def test_union_dataset_from_other_datasets(tempdir, multisourcefs):
child1 = ds.dataset('/plain', filesystem=multisourcefs, format='parquet')
child2 = ds.dataset('/schema', filesystem=multisourcefs, format='parquet',
partitioning=['week', 'color'])
child3 = ds.dataset('/hive', filesystem=multisourcefs, format='parquet',
partitioning='hive')
assert child1.schema != child2.schema != child3.schema
assembled = ds.dataset([child1, child2, child3])
assert isinstance(assembled, ds.UnionDataset)
msg = 'cannot pass any additional arguments'
with pytest.raises(ValueError, match=msg):
ds.dataset([child1, child2], filesystem=multisourcefs)
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
('week', pa.int32()),
('year', pa.int32()),
('month', pa.int32()),
])
assert assembled.schema.equals(expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
assembled = ds.dataset([child1, child3])
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
('year', pa.int32()),
('month', pa.int32()),
])
assert assembled.schema.equals(expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
expected_schema = pa.schema([
('month', pa.int32()),
('color', pa.string()),
('date', pa.date32()),
])
assembled = ds.dataset([child1, child3], schema=expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
expected_schema = pa.schema([
('month', pa.int32()),
('color', pa.string()),
('unknown', pa.string()) # fill with nulls
])
assembled = ds.dataset([child1, child3], schema=expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
# incompatible schemas, date and index columns have conflicting types
table = pa.table([range(9), [0.] * 4 + [1.] * 5, 'abcdefghj'],
names=['date', 'value', 'index'])
_, path = _create_single_file(tempdir, table=table)
child4 = ds.dataset(path)
with pytest.raises(pa.ArrowInvalid, match='Unable to merge'):
ds.dataset([child1, child4])
def test_dataset_from_a_list_of_local_directories_raises(multisourcefs):
msg = 'points to a directory, but only file paths are supported'
with pytest.raises(IsADirectoryError, match=msg):
ds.dataset(['/plain', '/schema', '/hive'], filesystem=multisourcefs)
def test_union_dataset_filesystem_datasets(multisourcefs):
# without partitioning
dataset = ds.dataset([
ds.dataset('/plain', filesystem=multisourcefs),
ds.dataset('/schema', filesystem=multisourcefs),
ds.dataset('/hive', filesystem=multisourcefs),
])
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
])
assert dataset.schema.equals(expected_schema)
# with hive partitioning for two hive sources
dataset = ds.dataset([
ds.dataset('/plain', filesystem=multisourcefs),
ds.dataset('/schema', filesystem=multisourcefs),
ds.dataset('/hive', filesystem=multisourcefs, partitioning='hive')
])
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
('year', pa.int32()),
('month', pa.int32()),
])
assert dataset.schema.equals(expected_schema)
@pytest.mark.parquet
def test_specified_schema(tempdir):
import pyarrow.parquet as pq
table = pa.table({'a': [1, 2, 3], 'b': [.1, .2, .3]})
pq.write_table(table, tempdir / "data.parquet")
def _check_dataset(schema, expected, expected_schema=None):
dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema)
if expected_schema is not None:
assert dataset.schema.equals(expected_schema)
else:
assert dataset.schema.equals(schema)
result = dataset.to_table()
assert result.equals(expected)
# no schema specified
schema = None
expected = table
_check_dataset(schema, expected, expected_schema=table.schema)
# identical schema specified
schema = table.schema
expected = table
_check_dataset(schema, expected)
# Specifying schema with change column order
schema = pa.schema([('b', 'float64'), ('a', 'int64')])
expected = pa.table([[.1, .2, .3], [1, 2, 3]], names=['b', 'a'])
_check_dataset(schema, expected)
# Specifying schema with missing column
schema = pa.schema([('a', 'int64')])
expected = pa.table([[1, 2, 3]], names=['a'])
_check_dataset(schema, expected)
# Specifying schema with additional column
schema = pa.schema([('a', 'int64'), ('c', 'int32')])
expected = pa.table([[1, 2, 3],
pa.array([None, None, None], type='int32')],
names=['a', 'c'])
_check_dataset(schema, expected)
# Specifying with incompatible schema
schema = pa.schema([('a', 'int32'), ('b', 'float64')])
dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema)
assert dataset.schema.equals(schema)
with pytest.raises(TypeError):
dataset.to_table()
def test_ipc_format(tempdir):
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.arrow')
with pa.output_stream(path) as sink:
writer = pa.RecordBatchFileWriter(sink, table.schema)
writer.write_batch(table.to_batches()[0])
writer.close()
dataset = ds.dataset(path, format=ds.IpcFileFormat())
result = dataset.to_table()
assert result.equals(table)
for format_str in ["ipc", "arrow"]:
dataset = ds.dataset(path, format=format_str)
result = dataset.to_table()
assert result.equals(table)
@pytest.mark.pandas
def test_csv_format(tempdir):
table = pa.table({'a': pa.array([1, 2, 3], type="int64"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.csv')
table.to_pandas().to_csv(path, index=False)
dataset = ds.dataset(path, format=ds.CsvFileFormat())
result = dataset.to_table()
assert result.equals(table)
dataset = ds.dataset(path, format='csv')
result = dataset.to_table()
assert result.equals(table)
def test_feather_format(tempdir):
from pyarrow.feather import write_feather
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
basedir = tempdir / "feather_dataset"
basedir.mkdir()
write_feather(table, str(basedir / "data.feather"))
dataset = ds.dataset(basedir, format=ds.IpcFileFormat())
result = dataset.to_table()
assert result.equals(table)
dataset = ds.dataset(basedir, format="feather")
result = dataset.to_table()
assert result.equals(table)
# ARROW-8641 - column selection order
result = dataset.to_table(columns=["b", "a"])
assert result.column_names == ["b", "a"]
result = dataset.to_table(columns=["a", "a"])
assert result.column_names == ["a", "a"]
# error with Feather v1 files
write_feather(table, str(basedir / "data1.feather"), version=1)
with pytest.raises(ValueError):
ds.dataset(basedir, format="feather").to_table()
def _create_parquet_dataset_simple(root_path):
import pyarrow.parquet as pq
metadata_collector = []
for i in range(4):
table = pa.table({'f1': [i] * 10, 'f2': np.random.randn(10)})
pq.write_to_dataset(
table, str(root_path), metadata_collector=metadata_collector
)
metadata_path = str(root_path / '_metadata')
# write _metadata file
pq.write_metadata(
table.schema, metadata_path,
metadata_collector=metadata_collector
)
return metadata_path, table
@pytest.mark.parquet
@pytest.mark.pandas # write_to_dataset currently requires pandas
def test_parquet_dataset_factory(tempdir):
root_path = tempdir / "test_parquet_dataset"
metadata_path, table = _create_parquet_dataset_simple(root_path)
dataset = ds.parquet_dataset(metadata_path)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 4
result = dataset.to_table()
assert result.num_rows == 40
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_factory_invalid(tempdir):
root_path = tempdir / "test_parquet_dataset_invalid"
metadata_path, table = _create_parquet_dataset_simple(root_path)
# remove one of the files
list(root_path.glob("*.parquet"))[0].unlink()
dataset = ds.parquet_dataset(metadata_path)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 4
with pytest.raises(FileNotFoundError):
dataset.to_table()
def _create_metadata_file(root_path):
# create _metadata file from existing parquet dataset
import pyarrow.parquet as pq
parquet_paths = list(sorted(root_path.rglob("*.parquet")))
schema = pq.ParquetFile(parquet_paths[0]).schema.to_arrow_schema()
metadata_collector = []
for path in parquet_paths:
metadata = pq.ParquetFile(path).metadata
metadata.set_file_path(str(path.relative_to(root_path)))
metadata_collector.append(metadata)
metadata_path = root_path / "_metadata"
pq.write_metadata(
schema, metadata_path, metadata_collector=metadata_collector
)
return metadata_path
def _create_parquet_dataset_partitioned(root_path):
import pyarrow.parquet as pq
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))],
names=["f1", "f2", "part"]
)
table = table.replace_schema_metadata({"key": "value"})
pq.write_to_dataset(table, str(root_path), partition_cols=['part'])
return _create_metadata_file(root_path), table
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_factory_partitioned(tempdir):
root_path = tempdir / "test_parquet_dataset_factory_partitioned"
metadata_path, table = _create_parquet_dataset_partitioned(root_path)
partitioning = ds.partitioning(flavor="hive")
dataset = ds.parquet_dataset(metadata_path, partitioning=partitioning)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 2
result = dataset.to_table()
assert result.num_rows == 20
# the partitioned dataset does not preserve order
result = result.to_pandas().sort_values("f1").reset_index(drop=True)
expected = table.to_pandas()
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_factory_metadata(tempdir):
# ensure ParquetDatasetFactory preserves metadata (ARROW-9363)
root_path = tempdir / "test_parquet_dataset_factory_metadata"
metadata_path, table = _create_parquet_dataset_partitioned(root_path)
dataset = ds.parquet_dataset(metadata_path, partitioning="hive")
assert dataset.schema.equals(table.schema)
assert b"key" in dataset.schema.metadata
fragments = list(dataset.get_fragments())
assert b"key" in fragments[0].physical_schema.metadata
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_lazy_filtering(tempdir, open_logging_fs):
fs, assert_opens = open_logging_fs
# Test to ensure that no IO happens when filtering a dataset
# created with ParquetDatasetFactory from a _metadata file
root_path = tempdir / "test_parquet_dataset_lazy_filtering"
metadata_path, _ = _create_parquet_dataset_simple(root_path)
# creating the dataset should only open the metadata file
with assert_opens([metadata_path]):
dataset = ds.parquet_dataset(
metadata_path,
partitioning=ds.partitioning(flavor="hive"),
filesystem=fs)
# materializing fragments should not open any file
with assert_opens([]):
fragments = list(dataset.get_fragments())
# filtering fragments should not open any file
with assert_opens([]):
list(dataset.get_fragments(ds.field("f1") > 15))
# splitting by row group should still not open any file
with assert_opens([]):
fragments[0].split_by_row_group(ds.field("f1") > 15)
# ensuring metadata of splitted fragment should also not open any file
with assert_opens([]):
rg_fragments = fragments[0].split_by_row_group()
rg_fragments[0].ensure_complete_metadata()
# FIXME(bkietz) on Windows this results in FileNotFoundErrors.
# but actually scanning does open files
# with assert_opens([f.path for f in fragments]):
# dataset.to_table()
@pytest.mark.parquet
@pytest.mark.pandas
def test_dataset_schema_metadata(tempdir):
# ARROW-8802
df = pd.DataFrame({'a': [1, 2, 3]})
path = tempdir / "test.parquet"
df.to_parquet(path)
dataset = ds.dataset(path)
schema = dataset.to_table().schema
projected_schema = dataset.to_table(columns=["a"]).schema
# ensure the pandas metadata is included in the schema
assert b"pandas" in schema.metadata
# ensure it is still there in a projected schema (with column selection)
assert schema.equals(projected_schema, check_metadata=True)
@pytest.mark.parquet
def test_filter_mismatching_schema(tempdir):
# ARROW-9146
import pyarrow.parquet as pq
table = pa.table({"col": pa.array([1, 2, 3, 4], type='int32')})
pq.write_table(table, str(tempdir / "data.parquet"))
# specifying explicit schema, but that mismatches the schema of the data
schema = pa.schema([("col", pa.int64())])
dataset = ds.dataset(
tempdir / "data.parquet", format="parquet", schema=schema)
# filtering on a column with such type mismatch should give a proper error
with pytest.raises(TypeError):
dataset.to_table(filter=ds.field("col") > 2)
fragment = list(dataset.get_fragments())[0]
with pytest.raises(TypeError):
fragment.to_table(filter=ds.field("col") > 2, schema=schema)
@pytest.mark.parquet
@pytest.mark.pandas
def test_dataset_project_only_partition_columns(tempdir):
# ARROW-8729
import pyarrow.parquet as pq
table = pa.table({'part': 'a a b b'.split(), 'col': list(range(4))})
path = str(tempdir / 'test_dataset')
pq.write_to_dataset(table, path, partition_cols=['part'])
dataset = ds.dataset(path, partitioning='hive')
all_cols = dataset.to_table(use_threads=False)
part_only = dataset.to_table(columns=['part'], use_threads=False)
assert all_cols.column('part').equals(part_only.column('part'))
@pytest.mark.parquet
@pytest.mark.pandas
def test_dataset_project_null_column(tempdir):
import pandas as pd
df = pd.DataFrame({"col": np.array([None, None, None], dtype='object')})
f = tempdir / "test_dataset_project_null_column.parquet"
df.to_parquet(f, engine="pyarrow")
dataset = ds.dataset(f, format="parquet",
schema=pa.schema([("col", pa.int64())]))
expected = pa.table({'col': pa.array([None, None, None], pa.int64())})
assert dataset.to_table().equals(expected)
def _check_dataset_roundtrip(dataset, base_dir, expected_files,
base_dir_path=None, partitioning=None):
base_dir_path = base_dir_path or base_dir
ds.write_dataset(dataset, base_dir, format="feather",
partitioning=partitioning, use_threads=False)
# check that all files are present
file_paths = list(base_dir_path.rglob("*"))
assert set(file_paths) == set(expected_files)
# check that reading back in as dataset gives the same result
dataset2 = ds.dataset(
base_dir_path, format="feather", partitioning=partitioning)
assert dataset2.to_table().equals(dataset.to_table())
@pytest.mark.parquet
def test_write_dataset(tempdir):
# manually create a written dataset and read as dataset object
directory = tempdir / 'single-file'
directory.mkdir()
_ = _create_single_file(directory)
dataset = ds.dataset(directory)
# full string path
target = tempdir / 'single-file-target'
expected_files = [target / "part-0.feather"]
_check_dataset_roundtrip(dataset, str(target), expected_files, target)
# pathlib path object
target = tempdir / 'single-file-target2'
expected_files = [target / "part-0.feather"]
_check_dataset_roundtrip(dataset, target, expected_files, target)
# TODO
# # relative path
# target = tempdir / 'single-file-target3'
# expected_files = [target / "part-0.ipc"]
# _check_dataset_roundtrip(
# dataset, './single-file-target3', expected_files, target)
# Directory of files
directory = tempdir / 'single-directory'
directory.mkdir()
_ = _create_directory_of_files(directory)
dataset = ds.dataset(directory)
target = tempdir / 'single-directory-target'
expected_files = [target / "part-0.feather"]
_check_dataset_roundtrip(dataset, str(target), expected_files, target)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_partitioned(tempdir):
directory = tempdir / "partitioned"
_ = _create_parquet_dataset_partitioned(directory)
partitioning = ds.partitioning(flavor="hive")
dataset = ds.dataset(directory, partitioning=partitioning)
# hive partitioning
target = tempdir / 'partitioned-hive-target'
expected_paths = [
target / "part=a", target / "part=a" / "part-0.feather",
target / "part=b", target / "part=b" / "part-1.feather"
]
partitioning_schema = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
_check_dataset_roundtrip(
dataset, str(target), expected_paths, target,
partitioning=partitioning_schema)
# directory partitioning
target = tempdir / 'partitioned-dir-target'
expected_paths = [
target / "a", target / "a" / "part-0.feather",
target / "b", target / "b" / "part-1.feather"
]
partitioning_schema = ds.partitioning(
pa.schema([("part", pa.string())]))
_check_dataset_roundtrip(
dataset, str(target), expected_paths, target,
partitioning=partitioning_schema)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_use_threads(tempdir):
directory = tempdir / "partitioned"
_ = _create_parquet_dataset_partitioned(directory)
dataset = ds.dataset(directory, partitioning="hive")
partitioning = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
target1 = tempdir / 'partitioned1'
ds.write_dataset(
dataset, target1, format="feather", partitioning=partitioning,
use_threads=True
)
target2 = tempdir / 'partitioned2'
ds.write_dataset(
dataset, target2, format="feather", partitioning=partitioning,
use_threads=False
)
# check that reading in gives same result
result1 = ds.dataset(target1, format="feather", partitioning=partitioning)
result2 = ds.dataset(target2, format="feather", partitioning=partitioning)
assert result1.to_table().equals(result2.to_table())
def test_write_table(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
base_dir = tempdir / 'single'
ds.write_dataset(table, base_dir,
basename_template='dat_{i}.arrow', format="feather")
# check that all files are present
file_paths = list(base_dir.rglob("*"))
expected_paths = [base_dir / "dat_0.arrow"]
assert set(file_paths) == set(expected_paths)
# check Table roundtrip
result = ds.dataset(base_dir, format="ipc").to_table()
assert result.equals(table)
# with partitioning
base_dir = tempdir / 'partitioned'
partitioning = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
ds.write_dataset(table, base_dir, format="feather",
basename_template='dat_{i}.arrow',
partitioning=partitioning)
file_paths = list(base_dir.rglob("*"))
expected_paths = [
base_dir / "part=a", base_dir / "part=a" / "dat_0.arrow",
base_dir / "part=b", base_dir / "part=b" / "dat_1.arrow"
]
assert set(file_paths) == set(expected_paths)
result = ds.dataset(base_dir, format="ipc", partitioning=partitioning)
assert result.to_table().equals(table)
def test_write_table_multiple_fragments(tempdir):
table = pa.table([
pa.array(range(10)), pa.array(np.random.randn(10)),
pa.array(np.repeat(['a', 'b'], 5))
], names=["f1", "f2", "part"])
table = pa.concat_tables([table]*2)
# Table with multiple batches written as single Fragment by default
base_dir = tempdir / 'single'
ds.write_dataset(table, base_dir, format="feather")
assert set(base_dir.rglob("*")) == set([base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(table)
# Same for single-element list of Table
base_dir = tempdir / 'single-list'
ds.write_dataset([table], base_dir, format="feather")
assert set(base_dir.rglob("*")) == set([base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(table)
# Provide list of batches to write multiple fragments
base_dir = tempdir / 'multiple'
ds.write_dataset(table.to_batches(), base_dir, format="feather")
assert set(base_dir.rglob("*")) == set(
[base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(table)
# Provide list of tables to write multiple fragments
base_dir = tempdir / 'multiple-table'
ds.write_dataset([table, table], base_dir, format="feather")
assert set(base_dir.rglob("*")) == set(
[base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(
pa.concat_tables([table]*2)
)
@pytest.mark.parquet
def test_write_dataset_parquet(tempdir):
import pyarrow.parquet as pq
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
# using default "parquet" format string
base_dir = tempdir / 'parquet_dataset'
ds.write_dataset(table, base_dir, format="parquet")
# check that all files are present
file_paths = list(base_dir.rglob("*"))
expected_paths = [base_dir / "part-0.parquet"]
assert set(file_paths) == set(expected_paths)
# check Table roundtrip
result = ds.dataset(base_dir, format="parquet").to_table()
assert result.equals(table)
# using custom options
for version in ["1.0", "2.0"]:
format = ds.ParquetFileFormat()
opts = format.make_write_options(version=version)
base_dir = tempdir / 'parquet_dataset_version{0}'.format(version)
ds.write_dataset(table, base_dir, format=format, file_options=opts)
meta = pq.read_metadata(base_dir / "part-0.parquet")
assert meta.format_version == version
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_arrow_schema_metadata(tempdir):
# ensure we serialize ARROW schema in the parquet metadata, to have a
# correct roundtrip (e.g. preserve non-UTC timezone)
import pyarrow.parquet as pq
table = pa.table({"a": [pd.Timestamp("2012-01-01", tz="Europe/Brussels")]})
assert table["a"].type.tz == "Europe/Brussels"
ds.write_dataset(table, tempdir, format="parquet")
result = pq.read_table(tempdir / "part-0.parquet")
assert result["a"].type.tz == "Europe/Brussels"
def test_write_dataset_schema_metadata(tempdir):
# ensure that schema metadata gets written
from pyarrow import feather
table = pa.table({'a': [1, 2, 3]})
table = table.replace_schema_metadata({b'key': b'value'})
ds.write_dataset(table, tempdir, format="feather")
schema = feather.read_table(tempdir / "part-0.feather").schema
assert schema.metadata == {b'key': b'value'}
@pytest.mark.parquet
def test_write_dataset_schema_metadata_parquet(tempdir):
# ensure that schema metadata gets written
import pyarrow.parquet as pq
table = pa.table({'a': [1, 2, 3]})
table = table.replace_schema_metadata({b'key': b'value'})
ds.write_dataset(table, tempdir, format="parquet")
schema = pq.read_table(tempdir / "part-0.parquet").schema
assert schema.metadata == {b'key': b'value'}
| apache-2.0 |
hugobowne/scikit-learn | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
rudhir-upretee/Sumo17_With_Netsim | tools/net/netstats.py | 1 | 1983 | #!/usr/bin/env python
"""
@file netstats.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2008-08-13
@version $Id: netstats.py 13811 2013-05-01 20:31:43Z behrisch $
Prints some information about a given network
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2009-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, string, sys, StringIO
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
def renderHTML(values):
print "<html><body>"
print "<h1>" + values["netname"] + "</h1></br>"
# network
print "<h2>Network</h2></br>"
# edges
print "<h2>Edges</h2></br>"
print "Edge number: " + str(values["edgeNumber"]) + "</br>"
print "Edgelength sum: " + str(values["edgeLengthSum"]) + "</br>"
print "Lanelength sum: " + str(values["laneLengthSum"]) + "</br>"
# nodes
print "<h2>Nodes</h2></br>"
print "Node number: " + str(values["nodeNumber"]) + "</br>"
print "</body></html>"
def renderPNG(values):
from matplotlib import rcParams
from pylab import *
bar([0], [values["edgeNumber"]], 1, color='r')
show()
if len(sys.argv) < 2:
print "Usage: " + sys.argv[0] + " <net>"
sys.exit()
print "Reading net..."
net = sumolib.net.readNet(sys.argv[1])
values = {}
values["netname"] = "hallo"
values["edgesPerLaneNumber"] = {}
values["edgeLengthSum"] = 0
values["laneLengthSum"] = 0
values["edgeNumber"] = len(net._edges)
values["nodeNumber"] = len(net._nodes)
for e in net._edges:
values["edgeLengthSum"] = values["edgeLengthSum"] + e._length
values["laneLengthSum"] = values["laneLengthSum"] + (e._length * float(len(e._lanes)))
if len(e._lanes) not in values["edgesPerLaneNumber"]:
values["edgesPerLaneNumber"][len(e._lanes)] = 0
values["edgesPerLaneNumber"][len(e._lanes)] = values["edgesPerLaneNumber"][len(e._lanes)] + 1
renderHTML(values)
renderPNG(values)
| gpl-3.0 |
grocsvs/grocsvs | src/grocsvs/utilities.py | 1 | 6754 | import collections
import errno
import h5py
import os
import numpy
import pandas as pandas
import pyfaidx
import re
import string
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
assert pickle.HIGHEST_PROTOCOL >= 2, \
"A relatively recent version of pickle is required"
class BinaryNotFoundError(Exception):
pass
def ensure_dir(directory):
try:
os.makedirs(directory)
except OSError as err:
if err.errno != errno.EEXIST:
raise
class cd:
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def check_memory(logger, min_memory=16):
try:
import psutil
physical_mem_gb = psutil.virtual_memory().total / (1000.**3)
if physical_mem_gb < min_memory:
logger.log("WARNING: GROC-SVs typically requires ~16 GB of memory to run; "
"you appear to have only {:.1f}GB".format(physical_mem_gb))
except:
pass
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
def get_key(options_dict, key, type_=basestring, default="error", error_msg="configuration"):
if default == "error" and key not in options_dict:
print "CONFIG ERROR: {} key '{}' is missing".format(error_msg, key)
sys.exit(1)
value = options_dict.get(key, default)
if type_ is not None and not isinstance(value, type_):
print "CONFIG ERROR: {} key '{}' should be type '{}', not '{}'".format(
error_msg, key, type_.__name__, type(value).__name__)
sys.exit(1)
return value
comp = string.maketrans('ATCGatcg','TAGCtagc')
def revcomp(seq):
return seq[::-1].translate(comp)
###############################################################################
###############################################################################
# based on hdf5.py from 10X Genomics
LEVEL_GROUP = "_levels"
def has_levels(ds):
""" Determine if a data column is leveled """
if "levels" in ds.attrs.keys():
return True
level_grp = ds.file.get(LEVEL_GROUP)
if level_grp:
ds_name = ds.name.split("/")[-1]
level_ds = level_grp.get(ds_name)
if level_ds:
return True
return False
def get_levels(ds):
""" Get the level index for a dataset """
if "levels" in ds.attrs.keys():
return ds.attrs["levels"][:]
level_grp = ds.file.get(LEVEL_GROUP)
if level_grp:
ds_name = ds.name.split("/")[-1]
level_ds = level_grp.get(ds_name)
if level_ds:
return level_ds[:]
return None
def get_column_intersection(column_names, columns):
if len(columns) > 0:
column_names = sorted(list(set(columns) & set(column_names)))
if len(column_names) == 0:
raise Exception("No valid column specifications.")
return column_names
def read_data_frame(fn, query_cols=[]):
""" Load a pandas DataFrame from an HDF5 file. If a column list is
specified, only load the matching columns """
with h5py.File(fn, "r") as f:
column_names = f.attrs.get("column_names")
column_names = get_column_intersection(column_names, query_cols)
df = pandas.DataFrame()
# Add the columns progressively to save memory
for name in column_names:
ds = f[name]
if has_levels(ds):
indices = ds[:]
uniques = get_levels(ds)
# This method of constructing of Categorical avoids copying the
# indices array which saves memory for big datasets
df[name] = pandas.Categorical(indices, categories=uniques,
ordered=False, fastpath=True)
else:
df[name] = pandas.Series(ds[:])
return df
###############################################################################
###############################################################################
def get_good_barcodes(fragments, proportion=0.90):
"""
return the top barcodes which together comprise 90% of reads
"""
read_counts = fragments.groupby("bc").sum()["num_reads"].copy()
read_counts.sort_values(inplace=True, ascending=False)
cutoff = proportion * read_counts.sum()
cutoff = numpy.where(read_counts.cumsum() >= cutoff)[0][0]
return sorted(read_counts.index[:cutoff])
def get_good_bc_count(step):
sample_info = step.options.sample_info(step.sample.name)
dataset_info = sample_info[step.dataset.id]
good_bc_count = dataset_info["good_bc_count"]
return good_bc_count
def cpu_count_physical():
"""
tries to get the number of physical (ie not virtual) cores
"""
try:
import psutil
return psutil.cpu_count(logical=False)
except:
import multiprocessing
return multiprocessing.cpu_count()
def frags_overlap_same_chrom(frags, start, end):
"""
get the fragments overlapping the interval [start, end], assuming
all fragments in the input table are already on the correct chromosome
"""
f = frags.loc[((frags["start_pos"] < start) & (frags["end_pos"] > start)) |
((frags["start_pos"] < end) & (frags["end_pos"] > end))]
return f
###############################################################################
###############################################################################
def plot_matrix_as_image(mat, x1=None, y1=None, x2=None, y2=None, maxval=None, main="", xlab="", ylab=""):
""" adds the image to the current plot if x1, x2, y1 and y2 are defined;
otherwise, create a new image with the dimensions of the matrix """
from rpy2.robjects import r
r.plot(numpy.array([0]),
xlim=numpy.array([x1,x2]),
ylim=numpy.array([y1,y2]),
type="n", bty="n",
main=main, xlab=xlab, ylab=ylab)
if maxval is None:
maxval = mat.max()
r.rasterImage(r["as.raster"](mat, max=maxval), x1, y1, x2, y2)
| mit |
Nyker510/scikit-learn | sklearn/cluster/birch.py | 207 | 22706 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/decomposition/base.py | 23 | 5656 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
altairpearl/scikit-learn | sklearn/metrics/classification.py | 2 | 73028 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# Bernardo Stein <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = ['%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
classes : array, shape = [n_labels], optional
(deprecated) Integer array of labels. This parameter has been
renamed to ``labels`` in version 0.18 and will be removed in 0.20.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
jgowans/correlation_plotter | impulse_field_test_results_generator_from_raw.py | 1 | 5519 | #!/usr/bin/env python
import argparse
import logging
from colorlog import ColoredFormatter
import time
import os
import csv
import datetime
import numpy as np
from directionFinder_backend.antenna_array import AntennaArray
from directionFinder_backend.direction_finder import DirectionFinder
from directionFinder_backend.correlator import Correlator
from directionFinder_backend.correlation import Correlation
import itertools
import matplotlib.pyplot as plt
class FakeCorrelation():
def add_cable_length_calibration(self, length_a, velocity_factor_a, length_b, velocity_factor_b):
pass
class FakeCorrelator(Correlator):
def __init__(self, ip_addr='192.168.14.30', num_channels=4, fs=800e6, logger=logging.getLogger(__name__), signals = None):
self.logger = logger
self.num_channels = num_channels
self.fs = np.float64(fs)
self.cross_combinations = list(itertools.combinations(range(num_channels), 2))
self.auto_combinations = [(0, 0)]
self.time_domain_calibration_values = None
self.time_domain_calibration_cable_values = None
self.frequency_correlations = {}
for a, b in self.cross_combinations:
self.frequency_correlations[(a, b)] = FakeCorrelation()
self.time_domain_signals = None
self.upsample_factor = 100
self.subsignal_length_max = 2**17
self.time_domain_padding = 100
def notch_filter(signal, fs, start, stop):
""" Sets bins from and including start throgh stop to 0
"""
fft = np.fft.rfft(signal)
axis = np.linspace(0, fs/2.0, len(fft))
for idx, freq in enumerate(axis):
if freq >= start and freq <= stop:
fft[idx] = 0
signal = np.fft.irfft(fft)
return signal
def time_domain_filter(signal, filter_len, filter_level):
signal_filtered = np.copy(signal)
for centre_idx in range(len(signal)):
accumulation = 0
for offset in range(-int(round(filter_len/2.0)), int(round(filter_len/2))):
idx = centre_idx + offset
if idx >= 0 and idx < len(signal):
accumulation += np.abs(signal[idx])
else:
accumulation += 0 # expcit: don't add anything
if accumulation < filter_level * filter_len:
signal_filtered[centre_idx] = 0
return signal_filtered
if __name__ == '__main__':
# setup root logger. Shouldn't be used much but will catch unexpected messages
colored_formatter = ColoredFormatter("%(log_color)s%(asctime)s:%(levelname)s:%(name)s:%(message)s")
handler = logging.StreamHandler()
handler.setFormatter(colored_formatter)
handler.setLevel(logging.DEBUG)
root = logging.getLogger()
root.addHandler(handler)
root.setLevel(logging.INFO)
logger = logging.getLogger('main')
logger.propagate = False
logger.addHandler(handler)
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--d', type=str)
parser.add_argument('--f_start', default=200e6, type=float)
parser.add_argument('--f_stop', default=300e6, type=float)
parser.add_argument('--array_geometry_file', default=None)
args = parser.parse_args()
directory = args.d
correlator = FakeCorrelator()
correlator.add_cable_length_calibrations('/home/jgowans/workspace/directionFinder_backend/config/cable_length_calibration_actual_array.json')
correlator.add_time_domain_calibration('/home/jgowans/workspace/directionFinder_backend/config/time_domain_calibration_through_rf_chain.json')
fs = correlator.fs
array = AntennaArray.mk_from_config(args.array_geometry_file)
df = DirectionFinder(correlator, array, args.f_start, logger.getChild('df'))
df.set_time()
contents = os.listdir(directory)
contents.sort()
for timestamp_str in contents:
try:
timestamp = float(timestamp_str)
except ValueError:
continue
#fig = plt.figure()
correlator.time_domain_signals = None
num_channels = 4
for channel in range(num_channels):
filename = "{c}.npy".format(c = channel)
with open("{d}/{t}/{f}".format(d = directory, t = timestamp, f = filename)) as f:
signal = np.load(f)
#subplot_sig = fig.add_subplot(4, 2, (2*channel) + 1)
#subplot_fft = fig.add_subplot(4, 2, (2*channel) + 2)
#subplot_sig.plot(signal)
#fft = np.abs(np.fft.rfft(signal))
#subplot_fft.plot(np.linspace(0, 400, len(fft)), fft)
signal = notch_filter(signal, fs, 0, args.f_start)
signal = notch_filter(signal, fs, args.f_stop, 400e6)
signal = time_domain_filter(signal, 10, 10)
if correlator.time_domain_signals == None:
correlator.time_domain_signals = np.ndarray((num_channels, len(signal)))
correlator.time_domain_signals[channel] = signal
#subplot_sig.plot(signal)
#fft = np.abs(np.fft.rfft(signal))
#subplot_fft.plot(np.linspace(0, 400, len(fft)), fft)
correlator.time_domain_axis = np.linspace(0,
len(correlator.time_domain_signals[0])/fs,
len(correlator.time_domain_signals[0]),
endpoint = False)
#plt.show()
df.df_impulse(args.d, t = timestamp)
exit()
| mit |
ilyes14/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
CVML/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
kedaio/tushare | tushare/stock/trading.py | 1 | 29297 | # -*- coding:utf-8 -*-
"""
交易数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from __future__ import division
import time
import json
import lxml.html
from lxml import etree
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
import re
from pandas.compat import StringIO
from tushare.util import dateu as du
from tushare.stock.reference import new_stocks
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_hist_data(code=None, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
获取个股历史交易记录
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取到API所提供的最早日期数据
end:string
结束日期 format:YYYY-MM-DD 为空时取到最近一个交易日数据
ktype:string
数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
属性:日期 ,开盘价, 最高价, 收盘价, 最低价, 成交量, 价格变动 ,涨跌幅,5日均价,10日均价,20日均价,5日均量,10日均量,20日均量,换手率
"""
symbol = _code_to_symbol(code)
url = ''
if ktype.upper() in ct.K_LABELS:
url = ct.DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
ct.K_TYPE[ktype.upper()], symbol)
elif ktype in ct.K_MIN_LABELS:
url = ct.DAY_PRICE_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
symbol, ktype)
else:
raise TypeError('ktype input error.')
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
cols = []
if (code in ct.INDEX_LABELS) & (ktype.upper() in ct.K_LABELS):
cols = ct.INX_DAY_PRICE_COLUMNS
else:
cols = ct.DAY_PRICE_COLUMNS
if len(js['record'][0]) == 14:
cols = ct.INX_DAY_PRICE_COLUMNS
df = pd.DataFrame(js['record'], columns=cols)
if ktype.upper() in ['D', 'W', 'M']:
df = df.applymap(lambda x: x.replace(u',', u''))
df[df==''] = 0
for col in cols[1:]:
df[col] = df[col].astype(float)
if start is not None:
df = df[df.date >= start]
if end is not None:
df = df[df.date <= end]
if (code in ct.INDEX_LABELS) & (ktype in ct.K_MIN_LABELS):
df = df.drop('turnover', axis=1)
df = df.set_index('date')
df = df.sort_index(ascending = False)
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _parsing_dayprice_json(pageNum=1):
"""
处理当日行情分页数据,格式为json
Parameters
------
pageNum:页码
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
"""
ct._write_console()
request = Request(ct.SINA_DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], pageNum))
text = urlopen(request, timeout=10).read()
if text == 'null':
return None
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text.decode('gbk') if ct.PY3 else text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
if ct.PY3:
jstr = json.dumps(text)
else:
jstr = json.dumps(text, encoding='GBK')
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}),
columns=ct.DAY_TRADING_COLUMNS)
df = df.drop('symbol', axis=1)
# df = df.ix[df.volume > 0]
return df
def get_tick_data(code=None, date=None, retry_count=3, pause=0.001):
"""
获取分笔数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.TICK_PRICE_URL % (ct.P_TYPE['http'], ct.DOMAINS['sf'], ct.PAGES['dl'],
date, symbol))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 20:
return None
df = pd.read_table(StringIO(lines), names=ct.TICK_COLUMNS,
skiprows=[0])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_sina_dd(code=None, date=None, vol=400, retry_count=3, pause=0.001):
"""
获取sina大单数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:股票代码 股票名称 交易时间 价格 成交量 前一笔价格 类型(买、卖、中性盘)
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
vol = vol*100
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.SINA_DD % (ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['sinadd'],
symbol, vol, date))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 100:
return None
df = pd.read_csv(StringIO(lines), names=ct.SINA_DD_COLS,
skiprows=[0])
if df is not None:
df['code'] = df['code'].map(lambda x: x[2:])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_ticks(code=None, retry_count=3, pause=0.001):
"""
获取当日分笔明细数据
Parameters
------
code:string
股票代码 e.g. 600848
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 :
return None
symbol = _code_to_symbol(code)
date = du.today()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.TODAY_TICKS_PAGE_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], date,
symbol))
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str[1:-1]
data_str = eval(data_str, type('Dummy', (dict,),
dict(__getitem__ = lambda s, n:n))())
data_str = json.dumps(data_str)
data_str = json.loads(data_str)
pages = len(data_str['detailPages'])
data = pd.DataFrame()
ct._write_head()
for pNo in range(1, pages+1):
data = data.append(_today_ticks(symbol, date, pNo,
retry_count, pause), ignore_index=True)
except Exception as er:
print(str(er))
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _today_ticks(symbol, tdate, pageNo, retry_count, pause):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
html = lxml.html.parse(ct.TODAY_TICKS_URL % (ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['t_ticks'],
symbol, tdate, pageNo
))
res = html.xpath('//table[@id=\"datatbl\"]/tbody/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
sarr = sarr.replace('--', '0')
df = pd.read_html(StringIO(sarr), parse_dates=False)[0]
df.columns = ct.TODAY_TICK_COLUMNS
df['pchange'] = df['pchange'].map(lambda x : x.replace('%', ''))
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_all():
"""
一次性获取最近一个日交易日所有股票的交易数据
return
-------
DataFrame
属性:代码,名称,涨跌幅,现价,开盘价,最高价,最低价,最日收盘价,成交量,换手率,成交额,市盈率,市净率,总市值,流通市值
"""
ct._write_head()
df = _parsing_dayprice_json(1)
if df is not None:
for i in range(2, ct.PAGE_NUM[0]):
newdf = _parsing_dayprice_json(i)
df = df.append(newdf, ignore_index=True)
return df
def get_realtime_quotes(symbols=None):
"""
获取实时交易数据 getting real time quotes data
用于跟踪交易情况(本次执行的结果-上一次执行的数据)
Parameters
------
symbols : string, array-like object (list, tuple, Series).
return
-------
DataFrame 实时交易数据
属性:0:name,股票名字
1:open,今日开盘价
2:pre_close,昨日收盘价
3:price,当前价格
4:high,今日最高价
5:low,今日最低价
6:bid,竞买价,即“买一”报价
7:ask,竞卖价,即“卖一”报价
8:volumn,成交量 maybe you need do volumn/100
9:amount,成交金额(元 CNY)
10:b1_v,委买一(笔数 bid volume)
11:b1_p,委买一(价格 bid price)
12:b2_v,“买二”
13:b2_p,“买二”
14:b3_v,“买三”
15:b3_p,“买三”
16:b4_v,“买四”
17:b4_p,“买四”
18:b5_v,“买五”
19:b5_p,“买五”
20:a1_v,委卖一(笔数 ask volume)
21:a1_p,委卖一(价格 ask price)
...
30:date,日期;
31:time,时间;
"""
symbols_list = ''
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for code in symbols:
symbols_list += _code_to_symbol(code) + ','
else:
symbols_list = _code_to_symbol(symbols)
symbols_list = symbols_list[:-1] if len(symbols_list) > 8 else symbols_list
request = Request(ct.LIVE_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['sinahq'],
_random(), symbols_list))
text = urlopen(request,timeout=10).read()
text = text.decode('GBK')
reg = re.compile(r'\="(.*?)\";')
data = reg.findall(text)
regSym = re.compile(r'(?:sh|sz)(.*?)\=')
syms = regSym.findall(text)
data_list = []
syms_list = []
for index, row in enumerate(data):
if len(row)>1:
data_list.append([astr for astr in row.split(',')])
syms_list.append(syms[index])
if len(syms_list) == 0:
return None
df = pd.DataFrame(data_list, columns=ct.LIVE_DATA_COLS)
df = df.drop('s', axis=1)
df['code'] = syms_list
ls = [cls for cls in df.columns if '_v' in cls]
for txt in ls:
df[txt] = df[txt].map(lambda x : x[:-2])
return df
def get_h_data(code, start=None, end=None, autype='qfq',
index=False, retry_count=3, pause=0.001, drop_factor=True):
'''
获取历史复权数据
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取当前日期
end:string
结束日期 format:YYYY-MM-DD 为空时取去年今日
autype:string
复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
drop_factor : bool, 默认 True
是否移除复权因子,在分析过程中可能复权因子意义不大,但是如需要先储存到数据库之后再分析的话,有该项目会更加灵活
return
-------
DataFrame
date 交易日期 (index)
open 开盘价
high 最高价
close 收盘价
low 最低价
volume 成交量
amount 成交金额
'''
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
qs = du.get_quarts(start, end)
qt = qs[0]
ct._write_head()
data = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
if data is None:
data = pd.DataFrame()
if len(qs)>1:
for d in range(1, len(qs)):
qt = qs[d]
ct._write_console()
df = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
if df is None: # 可能df为空,退出循环
break
else:
data = data.append(df, ignore_index=True)
if len(data) == 0 or len(data[(data.date>=start)&(data.date<=end)]) == 0:
return None
data = data.drop_duplicates('date')
if index:
data = data[(data.date>=start) & (data.date<=end)]
data = data.set_index('date')
data = data.sort_index(ascending=False)
return data
if autype == 'hfq':
if drop_factor:
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
if autype == 'qfq':
if drop_factor:
data = data.drop('factor', axis=1)
df = _parase_fq_factor(code, start, end)
df = df.drop_duplicates('date')
df = df.sort('date', ascending=False)
firstDate = data.head(1)['date']
frow = df[df.date == firstDate[0]]
rt = get_realtime_quotes(code)
if rt is None:
return None
if ((float(rt['high']) == 0) & (float(rt['low']) == 0)):
preClose = float(rt['pre_close'])
else:
if du.is_holiday(du.today()):
preClose = float(rt['price'])
else:
if (du.get_hour() > 9) & (du.get_hour() < 18):
preClose = float(rt['pre_close'])
else:
preClose = float(rt['price'])
rate = float(frow['factor']) / preClose
data = data[(data.date >= start) & (data.date <= end)]
for label in ['open', 'high', 'low', 'close']:
data[label] = data[label] / rate
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label] / data['factor']
if drop_factor:
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data = data.set_index('date')
data = data.sort_index(ascending = False)
data = data.astype(float)
return data
def _parase_fq_factor(code, start, end):
symbol = _code_to_symbol(code)
request = Request(ct.HIST_FQ_FACTOR_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], symbol))
text = urlopen(request, timeout=10).read()
text = text[1:len(text)-1]
text = text.decode('utf-8') if ct.PY3 else text
text = text.replace('{_', '{"')
text = text.replace('total', '"total"')
text = text.replace('data', '"data"')
text = text.replace(':"', '":"')
text = text.replace('",_', '","')
text = text.replace('_', '-')
text = json.loads(text)
df = pd.DataFrame({'date':list(text['data'].keys()), 'factor':list(text['data'].values())})
df['date'] = df['date'].map(_fun_except) # for null case
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
df['factor'] = df['factor'].astype(float)
return df
def _fun_except(x):
if len(x) > 10:
return x[-10:]
else:
return x
def _parse_fq_data(url, index, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath('//table[@id=\"FundHoldSharesTable\"]')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
if sarr == '':
return None
df = pd.read_html(sarr, skiprows = [0, 1])[0]
if len(df) == 0:
return pd.DataFrame()
if index:
df.columns = ct.HIST_FQ_COLS[0:7]
else:
df.columns = ct.HIST_FQ_COLS
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
except ValueError as e:
# 时间较早,已经读不到数据
return None
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_index():
"""
获取大盘指数行情
return
-------
DataFrame
code:指数代码
name:指数名称
change:涨跌幅
open:开盘价
preclose:昨日收盘价
close:收盘价
high:最高价
low:最低价
volume:成交量(手)
amount:成交金额(亿元)
"""
request = Request(ct.INDEX_HQ_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sinahq']))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('var hq_str_sh', '').replace('var hq_str_sz', '')
text = text.replace('";', '').replace('"', '').replace('=', ',')
text = '%s%s'%(ct.INDEX_HEADER, text)
df = pd.read_csv(StringIO(text), sep=',', thousands=',')
df['change'] = (df['close'] / df['preclose'] - 1 ) * 100
df['amount'] = df['amount'] / 100000000
df['change'] = df['change'].map(ct.FORMAT)
df['amount'] = df['amount'].map(ct.FORMAT4)
df = df[ct.INDEX_COLS]
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
df['change'] = df['change'].astype(float)
df['amount'] = df['amount'].astype(float)
return df
def _get_index_url(index, code, qt):
if index:
url = ct.HIST_INDEX_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
else:
url = ct.HIST_FQ_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
return url
def get_k_data(code=None, start='', end='',
ktype='D', autype='qfq',
index=False,
retry_count=3,
pause=0.001):
"""
获取k线数据
---------
Parameters:
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取上市首日
end:string
结束日期 format:YYYY-MM-DD 为空时取最近一个交易日
autype:string
复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
ktype:string
数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
date 交易日期 (index)
open 开盘价
high 最高价
close 收盘价
low 最低价
volume 成交量
code 股票代码
"""
symbol = ct.INDEX_SYMBOL[code] if index else _code_to_symbol(code)
url = ''
dataflag = ''
autype = '' if autype is None else autype
if (start is not None) & (start != ''):
end = du.today() if end is None or end == '' else end
if ktype.upper() in ct.K_LABELS:
fq = autype if autype is not None else ''
if code[:1] in ('1', '5') or index:
fq = ''
kline = '' if autype is None else 'fq'
if (start is None or start == '') & (end is None or end == ''):
urls = [ct.KLINE_TT_URL%(ct.P_TYPE['http'], ct.DOMAINS['tt'],
kline, fq, symbol,
ct.TT_K_TYPE[ktype.upper()], start, end,
fq, _random(17))]
else:
years = du.tt_dates(start, end)
urls = []
for year in years:
startdate = str(year) + '-01-01'
enddate = str(year+1) + '-12-31'
url = ct.KLINE_TT_URL%(ct.P_TYPE['http'], ct.DOMAINS['tt'],
kline, fq+str(year), symbol,
ct.TT_K_TYPE[ktype.upper()], startdate, enddate,
fq, _random(17))
urls.append(url)
dataflag = '%s%s'%(fq, ct.TT_K_TYPE[ktype.upper()])
elif ktype in ct.K_MIN_LABELS:
urls = [ct.KLINE_TT_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['tt'],
symbol, ktype, ktype,
_random(16))]
dataflag = 'm%s'%ktype
else:
raise TypeError('ktype input error.')
data = pd.DataFrame()
for url in urls:
data = data.append(_get_k_data(url, dataflag,
symbol, code,
index, ktype,
retry_count, pause),
ignore_index=True)
if ktype not in ct.K_MIN_LABELS:
if ((start is not None) & (start != '')) & ((end is not None) & (end != '')):
data = data[(data.date >= start) & (data.date <= end)]
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _get_k_data(url, dataflag='',
symbol='',
code = '',
index = False,
ktype = '',
retry_count=3,
pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
lines = urlopen(request, timeout = 10).read()
if len(lines) < 100: #no data
return None
except Exception as e:
print(e)
else:
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines.split('=')[1]
reg = re.compile(r',{"nd.*?}')
lines = re.subn(reg, '', lines)
js = json.loads(lines[0])
dataflag = dataflag if dataflag in list(js['data'][symbol].keys()) else ct.TT_K_TYPE[ktype.upper()]
df = pd.DataFrame(js['data'][symbol][dataflag], columns=ct.KLINE_TT_COLS)
df['code'] = symbol if index else code
if ktype in ct.K_MIN_LABELS:
df['date'] = df['date'].map(lambda x: '%s-%s-%s %s:%s'%(x[0:4], x[4:6],
x[6:8], x[8:10],
x[10:12]))
for col in df.columns[1:6]:
df[col] = df[col].astype(float)
return df
def get_hists(symbols, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
"""
df = pd.DataFrame()
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for symbol in symbols:
data = get_hist_data(symbol, start=start, end=end,
ktype=ktype, retry_count=retry_count,
pause=pause)
data['code'] = symbol
df = df.append(data, ignore_index=True)
return df
else:
return None
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6', '9'] else 'sz%s'%code
| bsd-3-clause |
Midafi/scikit-image | skimage/util/colormap.py | 25 | 12423 | from matplotlib.colors import LinearSegmentedColormap
viridis_data = [[ 0.26700401, 0.00487433, 0.32941519],
[ 0.26851048, 0.00960483, 0.33542652],
[ 0.26994384, 0.01462494, 0.34137895],
[ 0.27130489, 0.01994186, 0.34726862],
[ 0.27259384, 0.02556309, 0.35309303],
[ 0.27380934, 0.03149748, 0.35885256],
[ 0.27495242, 0.03775181, 0.36454323],
[ 0.27602238, 0.04416723, 0.37016418],
[ 0.2770184 , 0.05034437, 0.37571452],
[ 0.27794143, 0.05632444, 0.38119074],
[ 0.27879067, 0.06214536, 0.38659204],
[ 0.2795655 , 0.06783587, 0.39191723],
[ 0.28026658, 0.07341724, 0.39716349],
[ 0.28089358, 0.07890703, 0.40232944],
[ 0.28144581, 0.0843197 , 0.40741404],
[ 0.28192358, 0.08966622, 0.41241521],
[ 0.28232739, 0.09495545, 0.41733086],
[ 0.28265633, 0.10019576, 0.42216032],
[ 0.28291049, 0.10539345, 0.42690202],
[ 0.28309095, 0.11055307, 0.43155375],
[ 0.28319704, 0.11567966, 0.43611482],
[ 0.28322882, 0.12077701, 0.44058404],
[ 0.28318684, 0.12584799, 0.44496 ],
[ 0.283072 , 0.13089477, 0.44924127],
[ 0.28288389, 0.13592005, 0.45342734],
[ 0.28262297, 0.14092556, 0.45751726],
[ 0.28229037, 0.14591233, 0.46150995],
[ 0.28188676, 0.15088147, 0.46540474],
[ 0.28141228, 0.15583425, 0.46920128],
[ 0.28086773, 0.16077132, 0.47289909],
[ 0.28025468, 0.16569272, 0.47649762],
[ 0.27957399, 0.17059884, 0.47999675],
[ 0.27882618, 0.1754902 , 0.48339654],
[ 0.27801236, 0.18036684, 0.48669702],
[ 0.27713437, 0.18522836, 0.48989831],
[ 0.27619376, 0.19007447, 0.49300074],
[ 0.27519116, 0.1949054 , 0.49600488],
[ 0.27412802, 0.19972086, 0.49891131],
[ 0.27300596, 0.20452049, 0.50172076],
[ 0.27182812, 0.20930306, 0.50443413],
[ 0.27059473, 0.21406899, 0.50705243],
[ 0.26930756, 0.21881782, 0.50957678],
[ 0.26796846, 0.22354911, 0.5120084 ],
[ 0.26657984, 0.2282621 , 0.5143487 ],
[ 0.2651445 , 0.23295593, 0.5165993 ],
[ 0.2636632 , 0.23763078, 0.51876163],
[ 0.26213801, 0.24228619, 0.52083736],
[ 0.26057103, 0.2469217 , 0.52282822],
[ 0.25896451, 0.25153685, 0.52473609],
[ 0.25732244, 0.2561304 , 0.52656332],
[ 0.25564519, 0.26070284, 0.52831152],
[ 0.25393498, 0.26525384, 0.52998273],
[ 0.25219404, 0.26978306, 0.53157905],
[ 0.25042462, 0.27429024, 0.53310261],
[ 0.24862899, 0.27877509, 0.53455561],
[ 0.2468114 , 0.28323662, 0.53594093],
[ 0.24497208, 0.28767547, 0.53726018],
[ 0.24311324, 0.29209154, 0.53851561],
[ 0.24123708, 0.29648471, 0.53970946],
[ 0.23934575, 0.30085494, 0.54084398],
[ 0.23744138, 0.30520222, 0.5419214 ],
[ 0.23552606, 0.30952657, 0.54294396],
[ 0.23360277, 0.31382773, 0.54391424],
[ 0.2316735 , 0.3181058 , 0.54483444],
[ 0.22973926, 0.32236127, 0.54570633],
[ 0.22780192, 0.32659432, 0.546532 ],
[ 0.2258633 , 0.33080515, 0.54731353],
[ 0.22392515, 0.334994 , 0.54805291],
[ 0.22198915, 0.33916114, 0.54875211],
[ 0.22005691, 0.34330688, 0.54941304],
[ 0.21812995, 0.34743154, 0.55003755],
[ 0.21620971, 0.35153548, 0.55062743],
[ 0.21429757, 0.35561907, 0.5511844 ],
[ 0.21239477, 0.35968273, 0.55171011],
[ 0.2105031 , 0.36372671, 0.55220646],
[ 0.20862342, 0.36775151, 0.55267486],
[ 0.20675628, 0.37175775, 0.55311653],
[ 0.20490257, 0.37574589, 0.55353282],
[ 0.20306309, 0.37971644, 0.55392505],
[ 0.20123854, 0.38366989, 0.55429441],
[ 0.1994295 , 0.38760678, 0.55464205],
[ 0.1976365 , 0.39152762, 0.55496905],
[ 0.19585993, 0.39543297, 0.55527637],
[ 0.19410009, 0.39932336, 0.55556494],
[ 0.19235719, 0.40319934, 0.55583559],
[ 0.19063135, 0.40706148, 0.55608907],
[ 0.18892259, 0.41091033, 0.55632606],
[ 0.18723083, 0.41474645, 0.55654717],
[ 0.18555593, 0.4185704 , 0.55675292],
[ 0.18389763, 0.42238275, 0.55694377],
[ 0.18225561, 0.42618405, 0.5571201 ],
[ 0.18062949, 0.42997486, 0.55728221],
[ 0.17901879, 0.43375572, 0.55743035],
[ 0.17742298, 0.4375272 , 0.55756466],
[ 0.17584148, 0.44128981, 0.55768526],
[ 0.17427363, 0.4450441 , 0.55779216],
[ 0.17271876, 0.4487906 , 0.55788532],
[ 0.17117615, 0.4525298 , 0.55796464],
[ 0.16964573, 0.45626209, 0.55803034],
[ 0.16812641, 0.45998802, 0.55808199],
[ 0.1666171 , 0.46370813, 0.55811913],
[ 0.16511703, 0.4674229 , 0.55814141],
[ 0.16362543, 0.47113278, 0.55814842],
[ 0.16214155, 0.47483821, 0.55813967],
[ 0.16066467, 0.47853961, 0.55811466],
[ 0.15919413, 0.4822374 , 0.5580728 ],
[ 0.15772933, 0.48593197, 0.55801347],
[ 0.15626973, 0.4896237 , 0.557936 ],
[ 0.15481488, 0.49331293, 0.55783967],
[ 0.15336445, 0.49700003, 0.55772371],
[ 0.1519182 , 0.50068529, 0.55758733],
[ 0.15047605, 0.50436904, 0.55742968],
[ 0.14903918, 0.50805136, 0.5572505 ],
[ 0.14760731, 0.51173263, 0.55704861],
[ 0.14618026, 0.51541316, 0.55682271],
[ 0.14475863, 0.51909319, 0.55657181],
[ 0.14334327, 0.52277292, 0.55629491],
[ 0.14193527, 0.52645254, 0.55599097],
[ 0.14053599, 0.53013219, 0.55565893],
[ 0.13914708, 0.53381201, 0.55529773],
[ 0.13777048, 0.53749213, 0.55490625],
[ 0.1364085 , 0.54117264, 0.55448339],
[ 0.13506561, 0.54485335, 0.55402906],
[ 0.13374299, 0.54853458, 0.55354108],
[ 0.13244401, 0.55221637, 0.55301828],
[ 0.13117249, 0.55589872, 0.55245948],
[ 0.1299327 , 0.55958162, 0.55186354],
[ 0.12872938, 0.56326503, 0.55122927],
[ 0.12756771, 0.56694891, 0.55055551],
[ 0.12645338, 0.57063316, 0.5498411 ],
[ 0.12539383, 0.57431754, 0.54908564],
[ 0.12439474, 0.57800205, 0.5482874 ],
[ 0.12346281, 0.58168661, 0.54744498],
[ 0.12260562, 0.58537105, 0.54655722],
[ 0.12183122, 0.58905521, 0.54562298],
[ 0.12114807, 0.59273889, 0.54464114],
[ 0.12056501, 0.59642187, 0.54361058],
[ 0.12009154, 0.60010387, 0.54253043],
[ 0.11973756, 0.60378459, 0.54139999],
[ 0.11951163, 0.60746388, 0.54021751],
[ 0.11942341, 0.61114146, 0.53898192],
[ 0.11948255, 0.61481702, 0.53769219],
[ 0.11969858, 0.61849025, 0.53634733],
[ 0.12008079, 0.62216081, 0.53494633],
[ 0.12063824, 0.62582833, 0.53348834],
[ 0.12137972, 0.62949242, 0.53197275],
[ 0.12231244, 0.63315277, 0.53039808],
[ 0.12344358, 0.63680899, 0.52876343],
[ 0.12477953, 0.64046069, 0.52706792],
[ 0.12632581, 0.64410744, 0.52531069],
[ 0.12808703, 0.64774881, 0.52349092],
[ 0.13006688, 0.65138436, 0.52160791],
[ 0.13226797, 0.65501363, 0.51966086],
[ 0.13469183, 0.65863619, 0.5176488 ],
[ 0.13733921, 0.66225157, 0.51557101],
[ 0.14020991, 0.66585927, 0.5134268 ],
[ 0.14330291, 0.66945881, 0.51121549],
[ 0.1466164 , 0.67304968, 0.50893644],
[ 0.15014782, 0.67663139, 0.5065889 ],
[ 0.15389405, 0.68020343, 0.50417217],
[ 0.15785146, 0.68376525, 0.50168574],
[ 0.16201598, 0.68731632, 0.49912906],
[ 0.1663832 , 0.69085611, 0.49650163],
[ 0.1709484 , 0.69438405, 0.49380294],
[ 0.17570671, 0.6978996 , 0.49103252],
[ 0.18065314, 0.70140222, 0.48818938],
[ 0.18578266, 0.70489133, 0.48527326],
[ 0.19109018, 0.70836635, 0.48228395],
[ 0.19657063, 0.71182668, 0.47922108],
[ 0.20221902, 0.71527175, 0.47608431],
[ 0.20803045, 0.71870095, 0.4728733 ],
[ 0.21400015, 0.72211371, 0.46958774],
[ 0.22012381, 0.72550945, 0.46622638],
[ 0.2263969 , 0.72888753, 0.46278934],
[ 0.23281498, 0.73224735, 0.45927675],
[ 0.2393739 , 0.73558828, 0.45568838],
[ 0.24606968, 0.73890972, 0.45202405],
[ 0.25289851, 0.74221104, 0.44828355],
[ 0.25985676, 0.74549162, 0.44446673],
[ 0.26694127, 0.74875084, 0.44057284],
[ 0.27414922, 0.75198807, 0.4366009 ],
[ 0.28147681, 0.75520266, 0.43255207],
[ 0.28892102, 0.75839399, 0.42842626],
[ 0.29647899, 0.76156142, 0.42422341],
[ 0.30414796, 0.76470433, 0.41994346],
[ 0.31192534, 0.76782207, 0.41558638],
[ 0.3198086 , 0.77091403, 0.41115215],
[ 0.3277958 , 0.77397953, 0.40664011],
[ 0.33588539, 0.7770179 , 0.40204917],
[ 0.34407411, 0.78002855, 0.39738103],
[ 0.35235985, 0.78301086, 0.39263579],
[ 0.36074053, 0.78596419, 0.38781353],
[ 0.3692142 , 0.78888793, 0.38291438],
[ 0.37777892, 0.79178146, 0.3779385 ],
[ 0.38643282, 0.79464415, 0.37288606],
[ 0.39517408, 0.79747541, 0.36775726],
[ 0.40400101, 0.80027461, 0.36255223],
[ 0.4129135 , 0.80304099, 0.35726893],
[ 0.42190813, 0.80577412, 0.35191009],
[ 0.43098317, 0.80847343, 0.34647607],
[ 0.44013691, 0.81113836, 0.3409673 ],
[ 0.44936763, 0.81376835, 0.33538426],
[ 0.45867362, 0.81636288, 0.32972749],
[ 0.46805314, 0.81892143, 0.32399761],
[ 0.47750446, 0.82144351, 0.31819529],
[ 0.4870258 , 0.82392862, 0.31232133],
[ 0.49661536, 0.82637633, 0.30637661],
[ 0.5062713 , 0.82878621, 0.30036211],
[ 0.51599182, 0.83115784, 0.29427888],
[ 0.52577622, 0.83349064, 0.2881265 ],
[ 0.5356211 , 0.83578452, 0.28190832],
[ 0.5455244 , 0.83803918, 0.27562602],
[ 0.55548397, 0.84025437, 0.26928147],
[ 0.5654976 , 0.8424299 , 0.26287683],
[ 0.57556297, 0.84456561, 0.25641457],
[ 0.58567772, 0.84666139, 0.24989748],
[ 0.59583934, 0.84871722, 0.24332878],
[ 0.60604528, 0.8507331 , 0.23671214],
[ 0.61629283, 0.85270912, 0.23005179],
[ 0.62657923, 0.85464543, 0.22335258],
[ 0.63690157, 0.85654226, 0.21662012],
[ 0.64725685, 0.85839991, 0.20986086],
[ 0.65764197, 0.86021878, 0.20308229],
[ 0.66805369, 0.86199932, 0.19629307],
[ 0.67848868, 0.86374211, 0.18950326],
[ 0.68894351, 0.86544779, 0.18272455],
[ 0.69941463, 0.86711711, 0.17597055],
[ 0.70989842, 0.86875092, 0.16925712],
[ 0.72039115, 0.87035015, 0.16260273],
[ 0.73088902, 0.87191584, 0.15602894],
[ 0.74138803, 0.87344918, 0.14956101],
[ 0.75188414, 0.87495143, 0.14322828],
[ 0.76237342, 0.87642392, 0.13706449],
[ 0.77285183, 0.87786808, 0.13110864],
[ 0.78331535, 0.87928545, 0.12540538],
[ 0.79375994, 0.88067763, 0.12000532],
[ 0.80418159, 0.88204632, 0.11496505],
[ 0.81457634, 0.88339329, 0.11034678],
[ 0.82494028, 0.88472036, 0.10621724],
[ 0.83526959, 0.88602943, 0.1026459 ],
[ 0.84556056, 0.88732243, 0.09970219],
[ 0.8558096 , 0.88860134, 0.09745186],
[ 0.86601325, 0.88986815, 0.09595277],
[ 0.87616824, 0.89112487, 0.09525046],
[ 0.88627146, 0.89237353, 0.09537439],
[ 0.89632002, 0.89361614, 0.09633538],
[ 0.90631121, 0.89485467, 0.09812496],
[ 0.91624212, 0.89609127, 0.1007168 ],
[ 0.92610579, 0.89732977, 0.10407067],
[ 0.93590444, 0.8985704 , 0.10813094],
[ 0.94563626, 0.899815 , 0.11283773],
[ 0.95529972, 0.90106534, 0.11812832],
[ 0.96489353, 0.90232311, 0.12394051],
[ 0.97441665, 0.90358991, 0.13021494],
[ 0.98386829, 0.90486726, 0.13689671],
[ 0.99324789, 0.90615657, 0.1439362 ]]
viridis = LinearSegmentedColormap.from_list('viridis', viridis_data)
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | prototype_alpha/xgboost_take13.py | 1 | 2290 | """
Take 1 on the RandomForest, predicting for country_destinations.
"""
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
training = pd.read_csv("protoAlpha_training.csv")
testing = pd.read_csv("protoAlpha_testing.csv")
X = training.iloc[:,1:-1].values
y = training['country_destination'].values
x_train,x_valid,y_train,y_valid = train_test_split(X,y,test_size=0.3,random_state=None)
# LabelEncoder
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(y_train);
y_train = le.transform(y_train);
y_valid = le.transform(y_valid);
# Train classifier
import xgboost as xgb
xg_train = xgb.DMatrix(x_train,label=y_train);
xg_valid = xgb.DMatrix(x_valid,label=y_valid);
# setup parameters for xgboost
param = {}
# use softmax multi-class classification
param['objective'] = 'multi:softmax' # can be 'multi:softmax' or 'multi:softprob'
# scale weight of positive examples
param['eta'] = 0.9
param['max_depth'] = 1000
param['gamma'] = 0.1
param['silent'] = 0 # 1 means silent mode
param['nthread'] = 8
param['min_child_weight'] = 0.01 # 1 is the default; the larger, the more conservative
param['num_class'] = len(np.unique(y_train).tolist());
param['booster'] = 'gbtree' # default is 'gbtree'
param['subsample'] = 1.0 # default is 1.0
param['base_score'] = 0.5 # default is 0.5
# Train & Get validation data
num_round = 10000
clf = xgb.train(param, xg_train, num_round);
#clf = xgb.cv(param, xg_train, num_round);
# get predictions
y_preds = clf.predict( xg_valid );
# Run Predictions
from sklearn.metrics import confusion_matrix, accuracy_score
print( confusion_matrix(y_valid,y_preds) );
print( "Accuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f = open('xgboost_take13.txt', 'w')
f.write( str(confusion_matrix(y_valid,y_preds)) );
f.write( "\nAccuracy: %f" % (accuracy_score(y_valid,y_preds)) );
f.write( str(param) );
# Now on to final submission
xg_test = xgb.DMatrix(testing.iloc[:,1:].values);
y_final = le.inverse_transform( clf.predict(xg_test).reshape([62096,]).astype(int) );
y_final = pd.DataFrame(y_final);
numbahs = testing['id']
df = pd.concat([numbahs,y_final],axis=1)
df.columns = ['id','country']
df.to_csv("xgboost_take13.csv",index=False)
# Save model
clf.save_model('xgb_take13.model');
| gpl-2.0 |
mayblue9/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
ishanic/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
csgxy123/Dato-Core | src/unity/python/graphlab/deps/__init__.py | 13 | 1294 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
from distutils.version import StrictVersion
import logging
def __get_version(version):
if 'dev' in str(version):
version = version[:version.find('.dev')]
return StrictVersion(version)
HAS_PANDAS = True
PANDAS_MIN_VERSION = '0.13.0'
try:
import pandas
if __get_version(pandas.__version__) < StrictVersion(PANDAS_MIN_VERSION):
HAS_PANDAS = False
logging.warn(('Pandas version %s is not supported. Minimum required version: %s. '
'Pandas support will be disabled.')
% (pandas.__version__, PANDAS_MIN_VERSION) )
except:
HAS_PANDAS = False
import pandas_mock as pandas
HAS_NUMPY = True
NUMPY_MIN_VERSION = '1.8.0'
try:
import numpy
if __get_version(numpy.__version__) < StrictVersion(NUMPY_MIN_VERSION):
HAS_NUMPY = False
logging.warn(('Numpy version %s is not supported. Minimum required version: %s. '
'Numpy support will be disabled.')
% (numpy.__version__, NUMPY_MIN_VERSION) )
except:
HAS_NUMPY = False
import numpy_mock as numpy
| agpl-3.0 |
benjaminpope/whisky | pymask/__init__.py | 2 | 1629 | ''' --------------------------------------------------------------------
PYMASK: Python aperture masking analysis pipeline
--------------------------------------------------------------------
---
pymask is a python module for fitting models to aperture masking
data reduced to oifits format by the IDL masking pipeline.
It consists of a class, cpo, which stores all the relevant information
from the oifits file, and a set of functions, cp_tools, for manipulating
these data and fitting models.
Fitting is based on the MCMC Hammer algorithm (aka ensemble affine
invariant MCMC) or the MultiNest algorithm (aka multimodal nested
sampling). Both of these must be installed correctly or else
pymask won't work!
See readme.txt for more details.
- Ben
---
-------------------------------------------------------------------- '''
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pyfits as pf
import copy
import pickle
import os
import sys
import pdb
import oifits
shift = np.fft.fftshift
fft = np.fft.fft2
ifft = np.fft.ifft2
dtor = np.pi/180.0
import cp_tools
from cp_tools import *
import cpo
from cpo import *
# -------------------------------------------------
# set some defaults to display images that will
# look more like the DS9 display....
# -------------------------------------------------
#plt.set_cmap(cm.gray)
(plt.rcParams)['image.origin'] = 'lower'
(plt.rcParams)['image.interpolation'] = 'nearest'
# -------------------------------------------------
plt.close()
| gpl-3.0 |
ky822/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
cainiaocome/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
sarahgrogan/scikit-learn | doc/sphinxext/gen_rst.py | 106 | 40198 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
da1z/intellij-community | python/helpers/pydev/pydev_ipython/inputhook.py | 11 | 19160 | # coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_QT5 = 'qt5'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version # @UndefinedVariable
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__) # @UndefinedVariable
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp() # @UndefinedVariable
if app is None:
app = wx.App(redirect=False, clearSigInt=False) # @UndefinedVariable
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_qt5(self, app=None):
from pydev_ipython.inputhookqt5 import create_inputhook_qt5
app, inputhook_qt5 = create_inputhook_qt5(self, app)
self.set_inputhook(inputhook_qt5)
self._current_gui = GUI_QT5
app._in_event_loop = True
self._apps[GUI_QT5] = app
return app
def disable_qt5(self):
if GUI_QT5 in self._apps:
self._apps[GUI_QT5]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK # @UnresolvedImport
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_qt5 = inputhook_manager.enable_qt5
disable_qt5 = inputhook_manager.disable_qt5
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4,
GUI_QT5: enable_qt5,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_QT5",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt4",
"disable_qt4",
"enable_qt5",
"disable_qt5",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
| apache-2.0 |
Srisai85/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
pnedunuri/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
julienmalard/Tikon | pruebas/test_central/rcrs/modelo_calib_mód.py | 1 | 2555 | import numpy as np
import pandas as pd
import xarray as xr
from tikon.central import Módulo, SimulMódulo, Modelo, Exper, Parcela, Coso
from tikon.central.res import Resultado
from tikon.datos.datos import Datos
from tikon.ecs import ÁrbolEcs, CategEc, EcuaciónVacía, SubcategEc, Ecuación, Parám
from tikon.ecs.aprioris import APrioriDens
from tikon.datos import Obs
from tikon.utils import EJE_TIEMPO, EJE_PARC, EJE_ESTOC
f_inic = '2000-01-01'
class A(Parám):
nombre = 'a'
unids = None
líms = (None, None)
apriori = APrioriDens((0, 3), .90)
eje_cosos = 'coso'
class EcuaciónParám(Ecuación):
nombre = 'ec'
eje_cosos = 'coso'
cls_ramas = [A]
_nombre_res = 'res'
def eval(símismo, paso, sim):
ant = símismo.obt_valor_res(sim)
n_estoc = len(ant.coords[EJE_ESTOC])
return ant + símismo.cf['a'] + Datos(
(np.random.random(n_estoc) - 0.5) * 0.1, coords={EJE_ESTOC: np.arange(n_estoc)}, dims=[EJE_ESTOC]
)
class SubCategParám(SubcategEc):
nombre = 'subcateg'
cls_ramas = [EcuaciónParám, EcuaciónVacía]
eje_cosos = 'coso'
_nombre_res = 'res'
class CategParám(CategEc):
nombre = 'categ'
cls_ramas = [SubCategParám]
eje_cosos = 'coso'
class EcsParám(ÁrbolEcs):
nombre = 'árbol'
cls_ramas = [CategParám]
class CosoParám(Coso):
def __init__(símismo, nombre):
super().__init__(nombre, EcsParám)
class Res(Resultado):
nombre = 'res'
unids = None
def __init__(símismo, sim, coords, vars_interés):
coords = {'coso': sim.ecs.cosos, **coords}
super().__init__(sim, coords, vars_interés)
class SimulMóduloValid(SimulMódulo):
resultados = [Res]
def incrementar(símismo, paso, f):
super().incrementar(paso, f)
class Módulo1(Módulo):
nombre = 'módulo'
cls_simul = SimulMóduloValid
cls_ecs = EcsParám
eje_coso = 'coso'
class Módulo2(Módulo):
nombre = 'módulo 2'
cls_simul = SimulMóduloValid
cls_ecs = EcsParám
eje_coso = 'coso'
coso1 = CosoParám('hola')
coso2 = CosoParám('salut')
class MiObs(Obs):
mód = 'módulo'
var = 'res'
obs = MiObs(
datos=xr.DataArray(
np.arange(10),
coords={EJE_TIEMPO: pd.date_range(f_inic, periods=10, freq='D')}, dims=[EJE_TIEMPO]
).expand_dims({EJE_PARC: ['parcela'], 'coso': [coso1]})
)
exper = Exper('exper', Parcela('parcela'), obs=obs)
módulo1 = Módulo1(coso1)
módulo2 = Módulo2(coso2)
modelo = Modelo([módulo1, módulo2])
| agpl-3.0 |
r-mart/scikit-learn | sklearn/mixture/gmm.py | 68 | 31091 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
alistairlow/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 10 | 18978 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _fill_array(arr, seq, fillvalue=0):
"""
Recursively fills padded arr with elements from seq.
If length of seq is less than arr padded length, fillvalue used.
Args:
arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len].
seq: Non-padded list of data sampels of shape
[batch_size, ..., padded_dim(None)]
fillvalue: Default fillvalue to use.
"""
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = fillvalue
else:
for subarr, subseq in six.moves.zip_longest(arr, seq, fillvalue=()):
_fill_array(subarr, subseq, fillvalue)
def _pad_if_needed(batch_key_item, fillvalue=0):
""" Returns padded batch.
Args:
batch_key_item: List of data samples of any type with shape
[batch_size, ..., padded_dim(None)].
fillvalue: Default fillvalue to use.
Returns:
Padded with zeros tensor of same type and shape
[batch_size, ..., max_padded_dim_len].
Raises:
ValueError if data samples have different shapes (except last padded dim).
"""
shapes = [seq.shape[:-1] if len(seq.shape) > 0 else -1
for seq in batch_key_item]
if not all(shapes[0] == x for x in shapes):
raise ValueError("Array shapes must match.")
last_length = [seq.shape[-1] if len(seq.shape) > 0 else 0
for seq in batch_key_item]
if all([x == last_length[0] for x in last_length]):
return batch_key_item
batch_size = len(batch_key_item)
max_sequence_length = max(last_length)
result_batch = np.zeros(
shape=[batch_size] + list(shapes[0]) + [max_sequence_length],
dtype=batch_key_item[0].dtype)
_fill_array(result_batch, batch_key_item, fillvalue)
return result_batch
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None,
pad_value=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
self._pad_value = pad_value
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun "
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
if self._pad_value is not None:
feed_dict = {key: np.asarray(_pad_if_needed(item, self._pad_value))
for key, item in list(list_dict.items())}
else:
feed_dict = {key: np.asarray(item)
for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None,
pad_value=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
NotImplementedError: padding and shuffling data at the same time.
NotImplementedError: padding usage with non generator data type.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
pad_data = pad_value is not None
if pad_data and get_feed_fn is not _GeneratorFeedFn:
raise NotImplementedError(
"padding is only available with generator usage")
if shuffle and pad_data:
raise NotImplementedError(
"padding and shuffling data at the same time is not implemented")
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
elif pad_data:
min_after_dequeue = 0 # just for the summary text
queue_shapes = list(map(
lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x,
queue_shapes))
queue = data_flow_ops.PaddingFIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
if not pad_data:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
else:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs,
pad_value=pad_value))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/probabilistic_graphical_model/gpflow/gpflow_basic.py | 2 | 3080 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
import matplotlib
import gpflow
# REF [site] >> https://gpflow.readthedocs.io/en/latest/notebooks/models.html
def handle_model_example():
with gpflow.defer_build():
X = np.random.rand(20, 1)
Y = np.sin(12 * X) + 0.66 * np.cos(25 * X) + np.random.randn(20,1) * 0.01
model = gpflow.models.GPR(X, Y, kern=gpflow.kernels.Matern32(1) + gpflow.kernels.Linear(1))
# View, get and set parameters.
print(model)
print(model.as_pandas_table())
print(model.likelihood.as_pandas_table())
model.kern.kernels[0].lengthscales = 0.5
model.likelihood.variance = 0.01
print(model.as_pandas_table())
# Constraints and trainable variables.
print(model.read_trainables())
model.kern.kernels[0].lengthscales.transform = gpflow.transforms.Exp()
print(model.read_trainables())
#model.kern.kernels[1].variance.trainable = False
#print(model.as_pandas_table())
#print(model.read_trainables())
# Priors.
model.kern.kernels[0].variance.prior = gpflow.priors.Gamma(2, 3)
print(model.as_pandas_table())
# Optimization.
model.compile()
opt = gpflow.train.ScipyOptimizer()
opt.minimize(model)
class LinearMulticlass(gpflow.models.Model):
def __init__(self, X, Y, name=None):
super().__init__(name=name)
self.X = X.copy() # X is a numpy array of inputs.
self.Y = Y.copy() # Y is a 1-of-K representation of the labels.
self.num_data, self.input_dim = X.shape
_, self.num_classes = Y.shape
# Make some parameters.
self.W = gpflow.Param(np.random.randn(self.input_dim, self.num_classes))
self.b = gpflow.Param(np.random.randn(self.num_classes))
@gpflow.params_as_tensors
def _build_likelihood(self):
# Param variables are used as Tensorflow arrays.
p = tf.nn.softmax(tf.matmul(self.X, self.W) + self.b)
return tf.reduce_sum(tf.log(p) * self.Y)
# REF [site] >> https://gpflow.readthedocs.io/en/latest/notebooks/models.html
def build_new_model_example():
#%matplotlib inline
plt.style.use('ggplot')
X = np.vstack([
np.random.randn(10, 2) + [2, 2],
np.random.randn(10, 2) +[-2, 2],
np.random.randn(10, 2) +[2, -2]
])
Y = np.repeat(np.eye(3), 10, 0)
matplotlib.rcParams['figure.figsize'] = (12, 6)
plt.scatter(X[:,0], X[:,1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis)
model = LinearMulticlass(X, Y)
print(model.as_pandas_table())
opt = gpflow.train.ScipyOptimizer()
opt.minimize(model)
print(model.as_pandas_table())
xx, yy = np.mgrid[-4:4:200j, -4:4:200j]
X_test = np.vstack([xx.flatten(), yy.flatten()]).T
f_test = np.dot(X_test, model.W.read_value()) + model.b.read_value()
p_test = np.exp(f_test)
p_test /= p_test.sum(1)[:,None]
for i in range(3):
plt.contour(xx, yy, p_test[:,i].reshape(200, 200), [0.5], colors='k', linewidths=1)
plt.scatter(X[:,0], X[:,1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis)
def main():
#handle_model_example()
build_new_model_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
Manewing/orbs | scripts/plotstats.py | 1 | 1595 | #!/usr/bin/python3
import argparse
import matplotlib.pyplot as plt
def plotOrbsStats(stats_dir):
f = open(stats_dir + "/orbs.stats", "rb")
try:
data = f.read(4)
x = []
while data:
i = int.from_bytes(data, byteorder="little")
x.append(i)
data = f.read(4)
finally:
f.close()
plt.plot(x)
plt.title("Number of orbs")
plt.show()
def plotAvgLifeTimeStats(stats_dir):
f = open(stats_dir + "/avg_life_time.stats", "rb")
try:
data = f.read(4)
x = []
while data:
i = int.from_bytes(data, byteorder="little")
x.append(i)
data = f.read(4)
finally:
f.close()
plt.plot(x)
plt.title("Average life time")
plt.show()
def plotAvgInstrUsageStats(stats_dir):
f = open(stats_dir + "/avg_instr_usage.stats", "rb")
try:
data = f.read(4)
x = []
while data:
i = int.from_bytes(data, byteorder="little") / 1000.0
x.append(i)
data = f.read(4)
finally:
f.close()
plt.plot(x)
plt.title("Average instruction usage")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--stats", required=True,
help="Directory with statistics files")
args = parser.parse_args()
# plot statistics
plotOrbsStats(args.stats)
plt.show()
plotAvgLifeTimeStats(args.stats)
plt.show()
plotAvgInstrUsageStats(args.stats)
plt.show()
# show plots
plt.show()
| mit |
ibis-project/ibis | ibis/backends/parquet/tests/test_schema.py | 1 | 2910 | import tempfile
import numpy as np
import pandas as pd
import pytest
import ibis
import ibis.expr.datatypes as dt
pa = pytest.importorskip('pyarrow')
pq = pytest.importorskip('pyarrow.parquet')
@pytest.fixture
def parquet_schema():
np.random.seed(0)
size = 100
df = pd.DataFrame(
{
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Test other timestamp resolutions now that arrow
# supports them
'datetime': np.arange(
"2016-01-01T00:00:00.001", size, dtype='datetime64[ms]'
),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None]
+ [str(x) for x in range(size - 2)]
+ [None],
'empty_str': [''] * size,
'bytes': [b'foo'] * size,
},
columns=[
'uint8',
'uint16',
'uint32',
'uint64',
'int8',
'int16',
'int32',
'int64',
'float32',
'float64',
'bool',
'datetime',
'str',
'str_with_nulls',
'empty_str',
'bytes',
],
)
with tempfile.TemporaryFile() as path:
table = pa.Table.from_pandas(df)
pq.write_table(table, path)
parquet_file = pq.ParquetFile(path)
return parquet_file.schema
def test_convert_parquet(parquet_schema):
strings = [dt.string, dt.string, dt.string]
# uint32, int8, int16 stored as upcasted types
types = (
[
dt.uint8,
dt.uint16,
dt.int64,
dt.uint64,
dt.int16,
dt.int16,
dt.int32,
dt.int64,
dt.float32,
dt.float64,
dt.boolean,
dt.timestamp,
]
+ strings
+ [dt.binary, dt.int64]
)
names = [
'uint8',
'uint16',
'uint32',
'uint64',
'int8',
'int16',
'int32',
'int64',
'float32',
'float64',
'bool',
'datetime',
'str',
'str_with_nulls',
'empty_str',
'bytes',
]
expected = ibis.schema(zip(names, types))
result = ibis.infer_schema(parquet_schema)
assert result == expected
| apache-2.0 |
bmazin/ARCONS-pipeline | util/pixelExplorer.py | 1 | 35902 | from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import QtGui
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.cm as cm
import numpy as np
import sys
import os
import tables
from scipy.stats import chi2
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
from functools import partial
from util.ObsFile import ObsFile
from util.FileName import FileName
from util.readDict import readDict
from util.popup import PopUp,onscroll_cbar,onclick_cbar
#from hotpix import hotPixelsMatt as hotPixels
from hotpix import hotPixels
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Pixel Explorer')
paramFile = sys.argv[1]
self.params = readDict()
self.params.read_from_file(paramFile)
self.createMainFrame()
self.createStatusBar()
def __del__(self):
for stackLabel in self.stackObsFileLists:
for obs in self.stackObsFileLists[stackLabel]:
try:
obs.close()
except:
pass
def createMainFrame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
self.dpi = 100
self.fig = Figure((7.0, 7.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
self.axes0 = self.fig.add_subplot(111)
cid=self.canvas.mpl_connect('button_press_event', self.clickCanvas)
# Create the navigation toolbar, tied to the canvas
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def createStatusBar(self):
self.status_text = QLabel("Awaiting orders.")
self.statusBar().addWidget(self.status_text, 1)
def loadStacks(self):
#make a dictionary to put lists of ObsFile objects into
self.stackObsFileLists = dict()
self.stackWvlCals = dict()
for stackLabel in ['obs','sky','twilight']:
self.loadObs(stackLabel)
#Unload the lists of ObsFile objects into individual lists
self.obList = self.stackObsFileLists['obs']
self.skyObList = self.stackObsFileLists['sky']
self.twilightObList = self.stackObsFileLists['twilight']
self.cal = self.stackWvlCals['obs']
#If there isn't already an npz holding the obs stack spectra
#or if the params say to remake the npz regardless, then make the stack npz file
if os.path.exists(self.params['obsStackFileName'])==False or self.params['makeObsStackFile'] == True:
self.createStack('obs')
if os.path.exists(self.params['rawStackFileName'])==False or self.params['makeRawObsStackFile'] == True:
self.createStack('raw')
if os.path.exists(self.params['skyStackFileName'])==False or self.params['makeSkyStackFile'] == True:
self.createStack('sky')
if os.path.exists(self.params['twilightStackFileName'])==False or self.params['makeTwilightStackFile'] == True:
self.createStack('twilight')
#Now load all the info from the files, whether made previously or just now
stackFileName = self.params['obsStackFileName']
data = np.load(stackFileName)
self.spectra = data['spectra']
self.frame = data['frame']
#self.obsTotalIntTime = data['totalIntTime']
self.wvlBinEdges = data['wvlBinEdges']
self.wvlBinWidths = np.diff(self.wvlBinEdges)
self.nRow,self.nCol,self.nWvlBins = np.shape(self.spectra)
self.intTime = self.params['obsIntTime']
stackFileName = self.params['rawStackFileName']
data = np.load(stackFileName)
self.rawSpectra = data['spectra']
self.rawFrame = data['frame']
#self.rawTotalIntTime = data['totalIntTime']
stackFileName = self.params['skyStackFileName']
data = np.load(stackFileName)
self.skySpectra = data['spectra']
self.skyFrame = data['frame']
#self.skyTotalIntTime = data['totalIntTime']
self.skyIntTime = self.params['skyIntTime']
stackFileName = self.params['twilightStackFileName']
data = np.load(stackFileName)
self.twilightSpectra = data['spectra']
self.twilightFrame = data['frame']
#self.twilightTotalIntTime = data['totalIntTime']
self.twilightIntTime = self.params['twilightIntTime']
#Load flat info for the current obs stack
flatSolnFileName = FileName(run=self.params['run'],date=self.params['obsFlatCalSunsetDate'],tstamp=self.params['obsFlatCalTimestamp']).flatSoln()
flatInfoFileName = os.path.splitext(flatSolnFileName)[0]+'.npz'
self.flatInfo= np.load(flatInfoFileName)
#Load flat info for all flats specified in params file
flats = self.params['flatInfoFiles']
self.flatInfos = []
for flat in flats:
flatSolnFileName = FileName(run=self.params['run'],date=flat,tstamp='').flatSoln()
flatInfoFileName = os.path.splitext(flatSolnFileName)[0]+'.npz'
self.flatInfos.append(np.load(flatInfoFileName))
def prepareForClickPlots(self):
matplotlib.rcParams['font.size'] = 10
#create wavelength array more coarsely binned for use in showPixelWvlLightCurves and similar
self.rebinSpecBins = self.params['nWvlBands']
self.firstAfterConvolve = self.rebinSpecBins//2
self.rebinnedWvlEdges = self.wvlBinEdges[::self.rebinSpecBins]
self.averageTwilightSpectrum = np.zeros(self.nWvlBins)
spectra2d = np.reshape(self.twilightSpectra,[self.nRow*self.nCol,self.nWvlBins ])
fractionOfPixelsToTrim = .1
for iWvl in xrange(self.nWvlBins):
spectrum = spectra2d[:,iWvl]
goodSpectrum = spectrum[spectrum != 0]#dead pixels need to be taken out before calculating medians
goodSpectrum = np.sort(goodSpectrum)
nGoodPixels = len(goodSpectrum)
trimmedSpectrum = goodSpectrum[fractionOfPixelsToTrim*nGoodPixels:(1-fractionOfPixelsToTrim)*nGoodPixels]
self.averageTwilightSpectrum[iWvl] = np.mean(trimmedSpectrum)
def loadObs(self,stackLabel):
timestampList = self.params[stackLabel+'Sequence']
run = self.params['run']
sunsetDate = self.params[stackLabel+'SunsetDate']
utcDate = self.params[stackLabel+'UtcDate']
intTime = self.params[stackLabel+'IntTime']
wvlLowerCutoff = self.params[stackLabel+'WvlLowerCutoff']
wvlUpperCutoff = self.params[stackLabel+'WvlUpperCutoff']
calTimestamp = self.params[stackLabel+'WvlTimestamp']
print stackLabel,calTimestamp
wvlSolnFileName = FileName(run=run,date=sunsetDate,tstamp=calTimestamp).calSoln()
wvlCalFileName = FileName(run=run,date=self.params[stackLabel+'WvlSunsetDate'],tstamp=calTimestamp).cal()
flatSolnFileName = FileName(run=run,date=self.params[stackLabel+'FlatCalSunsetDate'],tstamp=self.params[stackLabel+'FlatCalTimestamp']).flatSoln()
obsFileNames = [FileName(run=run,date=sunsetDate,tstamp=timestamp).obs() for timestamp in timestampList]
obList = [ObsFile(obsFn) for obsFn in obsFileNames]
for ob in obList:
ob.loadWvlCalFile(wvlSolnFileName)
ob.loadFlatCalFile(flatSolnFileName)
self.stackObsFileLists[stackLabel] = obList
cal = ObsFile(wvlCalFileName)
cal.loadWvlCalFile(wvlSolnFileName)
cal.loadFlatCalFile(flatSolnFileName)
self.stackWvlCals[stackLabel] = cal
def createStack(self,stackLabel):
paramsLabel = stackLabel
weighted = True
if stackLabel == 'raw':
paramsLabel = 'obs'
weighted = False
getRawCounts = True
x = self.stackObsFileLists[paramsLabel][0].getSpectralCube(weighted=weighted)
spectra = x['cube']
wvlBinEdges = x['wvlBinEdges']
totalIntTime = 0
for ob in self.stackObsFileLists[paramsLabel][1:]:
print ob.fileName
x = ob.getSpectralCube(weighted=weighted)
cube = x['cube']
wvlBinEdges = x['wvlBinEdges']
spectra += cube
totalIntTime += ob.getFromHeader('exptime')
spectra = np.array(spectra,dtype=np.float64)
frame = np.sum(spectra,axis=2)
hotPixMask = hotPixels.checkInterval(image=frame)['mask']
frame[hotPixMask != 0] = np.nan
frame[frame == 0] = np.nan
stackFileName = self.params[stackLabel+'StackFileName']
np.savez(stackFileName,spectra=spectra,frame=frame,wvlBinEdges=wvlBinEdges,totalIntTime=totalIntTime)
def plotWeightedImage(self):
self.showFrame = np.array(self.frame)
self.showFrame[np.isnan(self.frame)] = 0
handleMatshow = self.axes0.matshow(self.showFrame,cmap=matplotlib.cm.gnuplot2,origin='lower',vmax=np.mean(self.showFrame)+3*np.std(self.showFrame))
self.fig.cbar = self.fig.colorbar(handleMatshow)
cid = self.fig.canvas.mpl_connect('scroll_event', partial(onscroll_cbar, self.fig))
cid = self.fig.canvas.mpl_connect('button_press_event', partial(onclick_cbar, self.fig))
def arrayPlots(self):
if self.params['showArrayRawImage']:
self.showArrayRawImage()
if self.params['showArrayStdVsIntTime']:
self.showArrayStdVsIntTime()
if self.params['showArrayWvlCalRange']:
self.showArrayWvlCalRange()
if self.params['showTwilightArrayImage']:
self.showTwilightArrayImage()
if self.params['showTwilightArrayStdVsFlux']:
self.showTwilightArrayStdVsFlux()
if self.params['showTwilightArrayReducedChisqImage']:
self.showTwilightArrayReducedChisqImage()
if self.params['showSkyArrayImage']:
self.showSkyArrayImage()
if self.params['showArrayLaserImage']:
self.showArrayLaserImage()
def clickCanvas(self,event):
if event.inaxes is self.axes0:
col = round(event.xdata)
row = round(event.ydata)
print '(%d,%d)'%(row,col)
if self.params['showPixelSpectrum'] or self.params['showPixelRawSpectrum']:
self.showPixelSpectrum(row,col)
if self.params['showPixelLightCurve']:
self.showPixelLightCurve(row,col)
if self.params['showPixelWvlLightCurves']:
self.showPixelWvlLightCurves(row,col)
if self.params['showPixelRawPhaseHist']:
self.showPixelRawPhaseHist(row,col)
if self.params['showPixelRawBaselineHist']:
self.showPixelRawBaselineHist(row,col)
if self.params['showPixelRawPeakHist']:
self.showPixelRawPeakHist(row,col)
if self.params['showPixelFlatWeights']:
self.showPixelFlatWeights(row,col)
if self.params['showPixelLaserSpectrum']:
self.showPixelLaserSpectrum(row,col)
if self.params['showTwilightPixelSpectrum']:
self.showTwilightPixelSpectrum(row,col)
if self.params['showTwilightPixelStdVsFlux']:
self.showTwilightPixelStdVsFlux(row,col)
if self.params['showTwilightPixelDeviationFromMedian']:
self.showTwilightPixelDeviationFromMedian(row,col)
if self.params['showPixelStdVsIntTime']:
self.showPixelStdVsIntTime(row,col)
def showArrayRawImage(self):
output = self.obList[0].getPixelCountImage(getRawCount=True,weighted=False)
frame = output['image']
for ob in self.obList[1:]:
output = ob.getPixelCountImage(getRawCount=True,weighted=False)
frame += output['image']
PopUp(parent=self,title='showArrayRawImage').plotArray(image=frame,title='raw image')
def showArrayStdVsIntTime(self):
intTimes = [1,2,3,5,10,15,30]
sdevVsIntTime = []
madVsIntTime = []
medVsIntTime = []
for intTime in intTimes:
image = np.zeros((self.nRow,self.nCol))
for iOb,ob in enumerate(self.skyObList):
x = ob.getPixelCountImage(firstSec=0,integrationTime=intTime)
image+=x['image']
hotPixMask = hotPixels.checkInterval(image=image)['mask']
image[hotPixMask!=0]=0
countList = image[image!=0]
sdevVsIntTime.append(np.std(countList))
madVsIntTime.append(np.median(np.abs(countList-np.median(countList))))
medVsIntTime.append(np.median(countList))
PopUp(parent=self).plotArray(image,title=r'%d std=%f mad=%f med=%f'%(intTime,sdevVsIntTime[-1],madVsIntTime[-1],medVsIntTime[-1]))
medVsIntTime = np.array(medVsIntTime)
sqrtNVsIntTime = np.sqrt(medVsIntTime)
pop = PopUp(parent=self,title='showArrayStdVsIntTime')
pop.axes.set_xlabel('integration time (s)')
pop.axes.set_ylabel('$\sigma$')
pop.axes.plot(intTimes,sqrtNVsIntTime,'k--',label=r'$\sqrt(med(N))$')
pop.axes.plot(intTimes,sdevVsIntTime,'k')
pop.axes.plot(intTimes,madVsIntTime,'r')
pop.draw()
def showTwilightArrayImage(self):
image = self.twilightFrame
image[np.isnan(image)] = 0
#self.popUpArray(image=image,title='twilight image')
PopUp(parent=self,title='showTwilightArrayImage').plotArray(image=image,title='Twilight Image')
def showTwilightArrayStdVsFlux(self):
pass
def showTwilightArrayReducedChisqImage(self):
chisqImage = np.zeros((self.nRow,self.nCol))
nDeltaFromZero = np.zeros((self.nRow,self.nCol,self.nWvlBins))
for iRow in range(self.nRow):
for iCol in range(self.nCol):
x = self.getChisq(iRow,iCol)
chisqImage[iRow,iCol] = x['reducedChisq']
nDeltaFromZero[iRow,iCol,:] = x['nDeltaFromZero']
chisqImage[np.isnan(chisqImage)]=0
chisqImage[chisqImage == np.inf]=0
nDeltaFromZero = np.ma.array(nDeltaFromZero,mask=np.logical_and(np.isnan(nDeltaFromZero),nDeltaFromZero==np.inf))
hotPixMask = hotPixels.checkInterval(image=chisqImage)['mask']
chisqImage[hotPixMask != 0] = 0
#self.popUpArray(image=chisqImage,title='Flat Cal $\chi^{2}_{red}$',normNSigma=1.)
PopUp(parent=self,title='showTwilightArrayReducedChisqImage').plotArray(image=chisqImage,title='Flat Cal $\chi^{2}_{red}$',normNSigma=1.)
#PopUp(parent=self,title='showTwilightArrayNDeltaFromZero').plotArray(image=nDeltaFromZero,title='Flat Cal n$\sigma$ from 0',normNSigma=3.)
# verbose = True
# pdfFullPath='/Scratch/flatCalSolnFiles2/nDeltaWvlSlicesTwiAppliedToSky.pdf'
# pp = PdfPages(pdfFullPath)
# nPlotsPerRow = 3
# nPlotsPerCol = 4
# nPlotsPerPage = nPlotsPerRow*nPlotsPerCol
# iPlot = 0
# if verbose:
# print 'plotting weights in wavelength sliced images'
#
# #matplotlib.rcParams['font.size'] = 4
# wvls = self.wvlBinEdges[0:-1]
#
# for iWvl,wvl in enumerate(wvls):
# if verbose:
# print 'wvl ',iWvl
# if iPlot % nPlotsPerPage == 0:
# fig = plt.figure(figsize=(10,10),dpi=100)
#
# ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
# ax.set_title(r'%.0f $\AA$'%wvl)
# image = nDeltaFromZero[:,:,iWvl]
#
# cmap = matplotlib.cm.gnuplot2
# cmap.set_bad('.1')
#
# handleMatshow = ax.matshow(image,cmap=cmap,origin='lower',vmin=-10.,vmax=10.)
# cbar = fig.colorbar(handleMatshow)
#
# if iPlot%nPlotsPerPage == nPlotsPerPage-1:
# pp.savefig(fig)
# iPlot += 1
# pp.savefig(fig)
# pp.close()
def showSkyArrayImage(self):
image = self.skyFrame
image[np.isnan(image)] = 0
#self.popUpArray(image,title='sky image')
PopUp(parent=self,title='showSkyArrayImage').plotArray(image=image,title='Sky Image')
def showArrayWvlCalRange(self):
rangeTable = self.cal.wvlRangeTable[:,:,1]#Get upper limit from valid wvl ranges
#self.popUpArray(image=rangeTable,title=r'Upper Wavecal Limits ($\AA$)')
PopUp(parent=self,title='showArrayWvlCalRange').plotArray(image=rangeTable,title=r'Upper Wavecal Limits ($\AA$)')
def showPixelSpectrum(self,row,col):
spectrum = self.spectra[row,col]
binWidths = np.diff(self.wvlBinEdges)
if self.params['showPixelRawSpectrum']:
weights = self.flatInfo['weights'][row,col]
rawSpectrum = self.spectra[row,col]/weights
rawSpectrum/=binWidths
spectrum/=binWidths
pop = PopUp(parent=self,title='showPixelSpectrum')
pop.axes.step(self.wvlBinEdges[:-1],spectrum,label='calibrated',color='b',where='post')
if self.params['showPixelRawSpectrum']:
pop.axes.step(self.wvlBinEdges[:-1],rawSpectrum,label='raw',color='r',where='post')
pop.axes.set_xlabel(r'$\lambda$ ($\AA$)')
pop.axes.set_ylabel(r'counts/$\AA$')
pop.axes.legend(loc='lower right')
pop.axes.set_title('spectrum (%d,%d)'%(row,col))
pop.draw()
def showPixelWvlLightCurves(self,row,col):
spectrumInTime = []
for iOb,ob in enumerate(self.obList):
for sec in range(0,ob.getFromHeader('exptime'),self.intTime):
x = ob.getPixelSpectrum(pixelRow=row,pixelCol=col,firstSec=sec,integrationTime=self.intTime,weighted=True)
spectrum = x['spectrum']
spectrum = np.convolve(spectrum,np.ones(self.rebinSpecBins),'same')[self.firstAfterConvolve::self.rebinSpecBins]
spectrumInTime.append(spectrum)
spectrumInTime = np.array(spectrumInTime)
nBins = np.shape(spectrumInTime)[1]
pop = PopUp(parent=self,title='showPixelWvlLightCurves')
#plot counts vs time for each wavelength bin
times=np.arange(len(spectrumInTime[:,0]))*self.intTime
for iBin in xrange(nBins):
pop.axes.plot(times,1.0*spectrumInTime[:,iBin]/self.intTime,
c=cm.jet((iBin+1.)/nBins),
label=r'%d-%d $\AA$'%(self.rebinnedWvlEdges[iBin],
self.rebinnedWvlEdges[iBin+1]))
pop.axes.set_xlabel('time (s)')
pop.axes.set_ylabel('cps')
#plot counts vs time summed over all wavelengths
pop.axes.legend(loc='upper right')
pop.axes.set_title('Light Curve by Band (%d,%d)'%(row,col))
pop.draw()
def showPixelLightCurve(self,row,col):
lightCurve = []
for iOb,ob in enumerate(self.obList):
for sec in range(0,ob.getFromHeader('exptime'),self.intTime):
x = ob.getPixelCount(iRow=row,iCol=col,firstSec=sec,integrationTime=self.intTime,weighted=True)
counts = x['counts']/self.intTime
lightCurve.append(counts)
pop = PopUp(parent=self,title='showPixelLightCurve')
times=np.arange(0,len(lightCurve)*self.intTime,self.intTime)
pop.axes.set_xlabel('time (s)')
pop.axes.set_ylabel('cps')
pop.axes.plot(times,lightCurve,c='k')
pop.axes.set_title('Light Curve (%d,%d)'%(row,col))
pop.draw()
def showArrayLaserImage(self):
getImageOutput = self.cal.getPixelCountImage(getRawCount=True,weighted=False)
frame = getImageOutput['image']
#self.popUpArray(image=self.rawFrame,title='raw image')
pop = PopUp(parent=self,title='showArrayLaserImage')
pop.plotArray(image=frame,title='laser cal raw image')
def showPixelLaserSpectrum(self,row,col):
#First plot the laser cal spectrum for this pixel to see if it's good
x = self.cal.getTimedPacketList(row,col)
phases=np.array(x['peakHeights'],dtype=np.double)-np.array(x['baselines'],dtype=np.double)
pop = PopUp(parent=self,title='showPixelLaserSpectrum')
nBins=np.max(phases)-np.min(phases)
histPhases,binEdges = np.histogram(phases,bins=nBins)
lambdaBinEdges = self.cal.convertToWvl(binEdges,row,col,excludeBad=True)
pop.axes.set_xlabel(r'$\lambda$ ($\AA$)')
if len(lambdaBinEdges)==0: #no wavecal for this pixel, so lambdaBinEdges came back empty
lambdaBinEdges = binEdges
pop.axes.set_xlabel('phase (ADU)')
pop.axes.step(lambdaBinEdges[:-1],histPhases,where='post',color='k')
pop.axes.set_ylabel('counts')
pop.axes.set_title('Raw Laser Cal Spectrum (%d,%d)'%(row,col))
wvlCalSigma = self.cal.wvlErrorTable[row,col]
xOffset = self.cal.wvlCalTable[row,col,0]
yOffset = self.cal.wvlCalTable[row,col,1]
amplitude = self.cal.wvlCalTable[row,col,2]
#energy(eV) = amplitude*(pulseHeight-xOffset)**2+yOffset
stackLabel = 'obs'
run = self.params['run']
sunsetDate = self.params[stackLabel+'SunsetDate']
calTimestamp = self.params[stackLabel+'WvlTimestamp']
wvlDriftFileName = FileName(run=run,date=sunsetDate,tstamp=calTimestamp).calDriftInfo()
wvlDriftFile = tables.openFile(wvlDriftFileName,mode='r')
wvlDriftInfo = wvlDriftFile.root.params_drift.driftparams.read()
wvlDriftFile.close()
driftEntry = wvlDriftInfo[np.logical_and(wvlDriftInfo['pixelrow']==row ,wvlDriftInfo['pixelcol']==col)][0]
#extract gaussianparams in first column of selected row
bluePhaseSigma=driftEntry[0][2]
bluePhaseAmp = driftEntry[0][1]
bluePhaseOffset = driftEntry[0][0]
redPhaseSigma=driftEntry[0][5]
redPhaseAmp = driftEntry[0][4]
redPhaseOffset = driftEntry[0][3]
phases = np.linspace(np.min(phases),np.max(phases),(nBins+1)*100.)
blueGaussFit = bluePhaseAmp*np.exp(-1/2*((phases-bluePhaseOffset)/bluePhaseSigma)**2)
redGaussFit = redPhaseAmp*np.exp(-1/2*((phases-redPhaseOffset)/redPhaseSigma)**2)
wavelengths = self.cal.convertToWvl(phases,row,col)
if len(wavelengths)==0: #no wavecal for this pixel, so lambdaBinEdges came back empty
wavelengths=phases
pop.axes.plot(wavelengths,blueGaussFit,'b')
pop.axes.plot(wavelengths,redGaussFit,'r')
pop.draw()
def showPixelStdVsIntTime(self,row,col):
intTimes = [1,2,3,5,10,15,30]
spectrumVsIntTimeVsTime = []
for intTime in intTimes:
spectrumInTime = []
for iOb,ob in enumerate(self.skyObList):
for sec in range(0,ob.getFromHeader('exptime'),intTime):
x = ob.getPixelSpectrum(pixelRow=row,pixelCol=col,firstSec=sec,integrationTime=intTime,weighted=True)
spectrum = x['spectrum']
spectrum = np.convolve(spectrum,np.ones(self.rebinSpecBins),'same')[self.firstAfterConvolve::self.rebinSpecBins]
spectrumInTime.append(spectrum)
spectrumInTime = np.array(spectrumInTime)
spectrumVsIntTimeVsTime.append(spectrumInTime)
#resulting array indexed as
#spectrumVsIntTimeVsTime[iIntTime][iTimeChunk][iWvlBin]
#sum over wavelength for total counts
countsVsIntTimeVsTime = [np.sum(spectrumInTime,axis=1) for spectrumInTime in spectrumVsIntTimeVsTime]
#countsVsIntTimeVsTime[iIntTime][iTimeChunk]
countStds = [np.std(countsVsTime) for countsVsTime in countsVsIntTimeVsTime]
countStds = np.array(countStds)
countSqrts = [np.sqrt(np.median(countsVsTime)) for countsVsTime in countsVsIntTimeVsTime]
countSqrts = np.array(countSqrts)
spectrumStds = [np.std(spectrumVsTime,axis=0) for spectrumVsTime in spectrumVsIntTimeVsTime]
spectrumSqrts = [np.sqrt(np.median(spectrumVsTime,axis=0)) for spectrumVsTime in spectrumVsIntTimeVsTime]
spectrumStds = np.array(spectrumStds)
spectrumSqrts = np.array(spectrumSqrts)
pop = PopUp(parent=self,title='showPixelStdVsIntTime')
pop.axes.set_xlabel('integration time (s)')
pop.axes.set_ylabel('normalized $\sigma$')
pop.axes.plot(intTimes,countSqrts/np.max(countSqrts),'k--',
label=r'$\sqrt{N}$')
pop.axes.plot(intTimes,countStds/np.max(countSqrts),'k',
label=r'%d-%d $\AA$'%(self.rebinnedWvlEdges[0],self.rebinnedWvlEdges[-1]))
nBins = np.shape(spectrumStds)[1]
for iBin in xrange(nBins):
pop.axes.plot(intTimes,
spectrumStds[:,iBin]/np.max(spectrumSqrts[:,iBin]),
c=cm.jet((iBin+1.0)/nBins),
label=r'%d-%d $\AA$'%(self.rebinnedWvlEdges[iBin],
self.rebinnedWvlEdges[iBin+1]))
pop.axes.legend(loc='upper left')
pop.axes.set_title('Sky Pixel StdDev vs Int Time (%d,%d)'%(row,col))
pop.draw()
def showPixelRawPhaseHist(self,row,col):
phases = np.array([],dtype=np.double)
for iOb,ob in enumerate(self.obList):
x = ob.getTimedPacketList(row,col)
phases=np.append(phases,(np.array(x['peakHeights'],dtype=np.double)-np.array(x['baselines'],dtype=np.double)))
pop = PopUp(parent=self,title='showPixelRawPhaseHist')
nBins=np.max(phases)-np.min(phases)
histPhases,binEdges = np.histogram(phases,bins=nBins)
pop.axes.step(binEdges[:-1],histPhases,where='post')
pop.axes.set_xlabel('peak-baseline')
pop.axes.set_title('Peaks-Baselines')
pop.draw()
def showPixelRawBaselineHist(self,row,col):
baselines = np.array([],dtype=np.double)
for iOb,ob in enumerate(self.obList):
x = ob.getTimedPacketList(row,col)
baselines=np.append(baselines,np.array(x['baselines'],dtype=np.double))
pop = PopUp(parent=self,title='showPixelRawBaselineHist')
nBins=np.max(baselines)-np.min(baselines)
histBaselines,binEdges = np.histogram(baselines,bins=nBins)
pop.axes.step(binEdges[:-1],histBaselines,where='post')
pop.axes.set_xlabel('baseline')
pop.axes.set_title('Baselines')
pop.draw()
def showPixelRawPeakHist(self,row,col):
peaks = np.array([],dtype=np.double)
for iOb,ob in enumerate(self.obList):
x = ob.getTimedPacketList(row,col)
peaks = np.append(peaks,np.array(x['peakHeights'],dtype=np.double))
pop = PopUp(parent=self,title='showPixelRawPeakHist')
nBins=np.max(peaks)-np.min(peaks)
histPeaks,binEdges = np.histogram(peaks,bins=nBins)
pop.axes.step(binEdges[:-1],histPeaks,where='post')
pop.axes.set_xlabel('peak')
pop.axes.set_title('Packet Peaks (No Baseline Subtracted)')
pop.draw()
def showPixelFlatWeights(self,row,col):
pop = PopUp(parent=self,title='showPixelFlatWeights')
for iFlat,flatInfo in enumerate(self.flatInfos):
weights = flatInfo['weights'][row,col]
flatSpectra = flatInfo['spectra'][row,col]
flatMedians = flatInfo['median']
deltaFlatSpectra = np.sqrt(flatSpectra)
deltaWeights = weights*deltaFlatSpectra/flatSpectra
color=cm.jet((iFlat+1.)/len(self.flatInfos))
wvlBinCenters = self.wvlBinEdges[:-1]+np.diff(self.wvlBinEdges)/2.
pop.axes.plot(self.wvlBinEdges[:-1],weights,linestyle='-',label=self.params['flatInfoFiles'][iFlat],color=color,)
pop.axes.errorbar(wvlBinCenters,weights,linestyle=',',yerr=deltaWeights,color=color)
pop.axes.set_xlabel(r'$\lambda$ ($\AA$)')
pop.axes.set_ylabel(r'Weights')
pop.axes.set_title('Flat Weights')
pop.axes.legend(loc='lower right')
pop.draw()
def showTwilightPixelSpectrum(self,row,col):
spectrum = self.twilightSpectra[row,col]
pop = PopUp(parent=self,title='showTwilightPixelSpectrum')
pop.axes.step(self.wvlBinEdges[:-1],spectrum,where='post')
pop.axes.set_xlabel(r'$\lambda$ ($\AA$)')
pop.axes.set_ylabel(r'total counts')
pop.axes.set_title('twilight spectrum (%d,%d) '%(row,col))
pop.draw()
def showTwilightPixelStdVsFlux(self,row,col):
spectrumVsFluxVsTime = []
for iOb,ob in enumerate(self.twilightObList):
spectrumInTime = []
for sec in range(0,ob.getFromHeader('exptime'),self.twilightIntTime):
x = ob.getPixelSpectrum(pixelRow=row,pixelCol=col,firstSec=sec,integrationTime=self.twilightIntTime,weighted=True)
spectrum = x['spectrum']
binEdges = x['wvlBinEdges']
spectrum = np.convolve(spectrum,np.ones(self.rebinSpecBins),'same')[self.firstAfterConvolve::self.rebinSpecBins]
spectrumInTime.append(spectrum)
spectrumInTime = np.array(spectrumInTime)
spectrumVsFluxVsTime.append(spectrumInTime)
spectrumVsFluxVsTime = np.array(spectrumVsFluxVsTime)
#resulting array indexed as
#spectrumVsFluxVsTime[iOb][iTimeChunk][iWvlBin]
#sum over wavelength for total counts
countsVsFluxVsTime = [np.sum(spectrumInTime,axis=1) for spectrumInTime in spectrumVsFluxVsTime]
#countsVsFluxVsTime[iFlux][iTimeChunk]
countStds = [np.std(countsVsTime) for countsVsTime in countsVsFluxVsTime]
fluxes = [np.median(countsVsTime) for countsVsTime in countsVsFluxVsTime]
fluxes = np.array(fluxes)
countStds = np.array(countStds)
countSqrts = [np.sqrt(np.median(countsVsTime)) for countsVsTime in countsVsFluxVsTime]
countSqrts = np.array(countSqrts)
spectrumStds = [np.std(spectrumVsTime,axis=0) for spectrumVsTime in spectrumVsFluxVsTime]
spectrumSqrts = [np.sqrt(np.median(spectrumVsTime,axis=0)) for spectrumVsTime in spectrumVsFluxVsTime]
spectrumStds = np.array(spectrumStds)
spectrumSqrts = np.array(spectrumSqrts)
pop = PopUp(parent=self,title='showTwilightPixelStdVsFlux')
pop.axes.set_xlabel('median counts')
pop.axes.set_ylabel('normalized $\sigma$')
pop.axes.plot(fluxes,countSqrts/np.max(countSqrts),'k--',
label=r'$\sqrt{N}$')
pop.axes.plot(fluxes,countStds/np.max(countSqrts),'k',
label=r'%d-%d $\AA$'%(self.rebinnedWvlEdges[0],self.rebinnedWvlEdges[-1]))
nBins = np.shape(spectrumStds)[1]
for iBin in xrange(nBins):
pop.axes.plot(fluxes,
spectrumStds[:,iBin]/np.max(spectrumSqrts[:,iBin]),
c=cm.jet((iBin+1.0)/nBins),
label=r'%d-%d $\AA$'%(self.rebinnedWvlEdges[iBin],
self.rebinnedWvlEdges[iBin+1]))
pop.axes.legend(loc='upper left')
pop.axes.set_title('Normalized Standard Deviation vs Twilight Flux, (%d,%d)'%(row,col))
pop.draw()
def getChisq(self,row,col):
spectrum = self.twilightSpectra[row,col]
weights = self.flatInfo['weights'][row,col]
flatSpectra = self.flatInfo['spectra'][row,col]
flatMedians = self.flatInfo['median']
deltaFlatSpectra = np.sqrt(flatSpectra)
deltaWeights = self.flatInfo['deltaWeights'][row,col]#weights*deltaFlatSpectra/flatSpectra
poissonDeltaSpectra = np.sqrt(spectrum)
rawSpectrum = spectrum/weights
deltaRawSpectrum = np.sqrt(rawSpectrum)
deltaSpectra = spectrum*np.sqrt((deltaWeights/weights)**2+(deltaRawSpectrum/rawSpectrum)**2)
diffSpectrum = (spectrum-self.averageTwilightSpectrum)
percentDiffSpectrum = 100.* diffSpectrum/self.averageTwilightSpectrum
#deltaDiffSpectrum = np.sqrt(deltaSpectra**2+deltaSpectra2**2)
deltaDiffSpectrum = np.array(deltaSpectra)
#deltaDiffSpectrum[np.isnan(deltaDiffSpectrum)] = 0
deltaPercentDiffSpectrum = 100.*deltaDiffSpectrum/self.averageTwilightSpectrum
nDeltaFromZero = diffSpectrum/deltaDiffSpectrum
chisqSumTerms =diffSpectrum**2/deltaDiffSpectrum**2
chisqSumTerms = chisqSumTerms[~np.isnan(chisqSumTerms)]
chisq = np.sum(chisqSumTerms)
degreesOfFreedom=sum(~np.isnan(chisqSumTerms))-1
reducedChisq = chisq/degreesOfFreedom
return {'reducedChisq':reducedChisq,
'percentDiffSpectrum':percentDiffSpectrum,
'deltaPercentDiffSpectrum':deltaPercentDiffSpectrum,
'nDeltaFromZero':nDeltaFromZero,
'degreesOfFreedom':degreesOfFreedom,
'chisq':chisq}
def showTwilightPixelDeviationFromMedian(self,row,col):
x = self.getChisq(row,col)
reducedChisq = x['reducedChisq']
chisq = x['chisq']
percentDiffSpectrum = x['percentDiffSpectrum']
deltaPercentDiffSpectrum = x['deltaPercentDiffSpectrum']
nDeltaFromZero = x['nDeltaFromZero']
degreesOfFreedom = x['degreesOfFreedom']
print 'reduced chisq =',reducedChisq
print 'P-value =',1-chi2.cdf(chisq,degreesOfFreedom)
pop = PopUp(parent=self,title='showTwilightPixelDeviationFromMedian')
pop.axes.errorbar(self.wvlBinEdges[:-1],percentDiffSpectrum,linestyle='-',color='k',yerr=deltaPercentDiffSpectrum)
pop.axes.set_xlabel(r'$\lambda$ ($\AA$)')
pop.axes.set_ylabel(r'percent difference')
pop.axes.plot(self.wvlBinEdges[:-1],len(self.wvlBinEdges[:-1])*[0],'gray')
axes2 = pop.axes.twinx()
axes2.plot(self.wvlBinEdges[:-1],nDeltaFromZero,'m',alpha=.7)
align_yaxis(pop.axes,0,axes2,0)
axes2.set_ylabel(r'(pixelSpectrum-avgSpectrum)/$\sigma$',color='m')
pop.axes.set_title('Deviation from Avg Spectrum (%d,%d)'%(row,col))
pop.draw()
weights = self.flatInfo['weights'][row,col]
pop = PopUp(parent=self,title='showTwilightPixelDeviationFromMedian')
pop.axes.step(self.wvlBinEdges[:-1],self.averageTwilightSpectrum/self.wvlBinWidths,'k',label='avg')
pop.axes.step(self.wvlBinEdges[:-1],self.twilightSpectra[row,col]/self.wvlBinWidths,'b',label='weighted')
pop.axes.step(self.wvlBinEdges[:-1],(self.twilightSpectra[row,col]/weights)/self.wvlBinWidths,'r',label='raw')
pop.axes.set_xlabel(r'$\lambda$ ($\AA$)')
pop.axes.set_ylabel(r'counts per $\AA$')
pop.axes.set_title('Twilight Spectrum (%d,%d)'%(row,col))
pop.axes.legend(loc='lower right')
pop.draw()
def align_yaxis(ax1, v1, ax2, v2):
"""
adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1
Taken from http://stackoverflow.com/questions/10481990/matplotlib-axis-with-two-scales-shared-origin
"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
def main():
np.seterr(divide='ignore',invalid='ignore')
app = QApplication(sys.argv)
form = AppForm()
form.loadStacks()
form.prepareForClickPlots()
form.arrayPlots()
form.plotWeightedImage()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| gpl-2.0 |
lituan/tools | pairwise_align_parameters.py | 1 | 7324 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
try different combinations of gap_open and gap_extend to see the changes of score and identity
input seqs in fasta format, output sequence similarity matrix
usuage example
python pairwise_align_parameter example.fasta
"""
import os
import sys
import numpy as np
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
from Bio.SubsMat.MatrixInfo import *
from multiprocessing import Pool
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib import cm
def align(seq1, seq2, matrix,matrix_name):
# matrix = matlist.blosum62
# gap_open = -10 # usual value
# gap_extend = -0.5 # usual value
OPEN_BEGIN,OPEN_END,OPEN_STEP = -20.5,-2.0,0.1
EXTEND_BEGIN,EXTEND_END,EXTEND_STEP = -2.0,-0.1,0.1
opens = np.arange(OPEN_BEGIN,OPEN_END,OPEN_STEP)
extends = np.arange(EXTEND_BEGIN,EXTEND_END,EXTEND_STEP)
gx = [[opens[i] for j in range(len(extends))] for i in range(len(opens))]
gy = [extends for i in range(len(opens))]
scores = []
identities = []
best_score = 0
best_score_c = ''
best_identity = 0
best_identity_c = ''
para = []
for gap_open in opens:
score_i = []
identity_i = []
for gap_extend in extends:
if gap_open > gap_extend:
score_i.append(0)
identity_i.append(0)
para.append([gap_open,gap_extend,0,0])
else:
alns = pairwise2.align.globalds(seq1, seq2, matrix, gap_open, gap_extend)
seqq1 = alns[0][0]
seqq2 = alns[0][1]
identity = [1 for i, s in enumerate(seqq1) if s == seqq2[i]]
identity = 1.0 * len(identity)/ len(seqq1)
score_i.append(alns[0][2])
identity_i.append(identity)
if alns[0][2] > best_score:
best_score = alns[0][2]
best_score_c = (gap_open,gap_extend)
if identity > best_identity:
best_identity = identity
best_identity_c = (gap_open,gap_extend)
para.append([gap_open,gap_extend,alns[0][2],identity])
scores.append(score_i)
identities.append(identity_i)
# plot scores
# X_OFFSET,Y_OFFSET,Z_OFFSET = 0.5,0.1,20
z_max = max([max([si for si in s ]) for s in scores])
z_min = min([min([si for si in s ]) for s in scores])
# if z_max > 0:
# fig = plt.figure()
# ax = fig.add_subplot(111,projection='3d')
# ax.plot_surface(gx,gy,scores,alpha=0.3)
# ax.contour(gx,gy,scores,zdir='z',offset=z_min-Z_OFFSET,cmap=cm.coolwarm)
# ax.contour(gx,gy,scores,zdir='x',offset=OPEN_BEGIN-X_OFFSET,cmap=cm.coolwarm)
# ax.contour(gx,gy,scores,zdir='y',offset=EXTEND_END+Y_OFFSET,cmap=cm.coolwarm)
# ax.set_xlim(OPEN_BEGIN-X_OFFSET,OPEN_END+X_OFFSET)
# ax.set_ylim(EXTEND_BEGIN-Y_OFFSET,EXTEND_END+Y_OFFSET)
# ax.set_zlim(z_min-Z_OFFSET,z_max+Z_OFFSET)
# # ax.plot_wireframe(gx,gy,scores)
# ax.set_xlabel('gap_open',labelpad=10)
# ax.set_ylabel('gap_extend',labelpad=10)
# ax.set_zlabel('score',labelpad=10)
# ax.set_title('Pairwise Alignment Score')
# fig.savefig(str(matrix_name)+'_align_score.png')
# plt.close()
# # plot identities
# X_OFFSET,Y_OFFSET,Z_OFFSET = 0.5,0.1,0.02
# z_max = max([max([si for si in s ]) for s in identities])
# z_min = min([min([si for si in s ]) for s in identities])
# if z_max > 0:
# fig = plt.figure()
# ax = fig.add_subplot(111,projection='3d')
# ax.plot_surface(gx,gy,identities,alpha=0.3)
# ax.contour(gx,gy,identities,zdir='z',offset=z_min-Z_OFFSET,cmap=cm.coolwarm)
# ax.contour(gx,gy,identities,zdir='x',offset=OPEN_BEGIN-X_OFFSET,cmap=cm.coolwarm)
# ax.contour(gx,gy,identities,zdir='y',offset=EXTEND_END+Y_OFFSET,cmap=cm.coolwarm)
# ax.set_xlim(OPEN_BEGIN-X_OFFSET,OPEN_END+X_OFFSET)
# ax.set_ylim(EXTEND_BEGIN-Y_OFFSET,EXTEND_END+Y_OFFSET)
# ax.set_zlim(z_min-Z_OFFSET,z_max+Z_OFFSET)
# # ax.plot_wireframe(gx,gy,scores)
# ax.set_xlabel('gap_open',labelpad=10)
# ax.set_ylabel('gap_extend',labelpad=10)
# ax.set_zlabel('identity',labelpad=10)
# ax.set_title('Pairwise Alignment Identity')
# fig.savefig(str(matrix_name)+'_align_identity.png')
# plt.close()
# if z_max > 0:
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.scatter(scores,identities)
# ax.set_xlabel('Score',labelpad=10)
# ax.set_ylabel('Identity',labelpad=10)
# ax.set_title('Pairwise Alignment Identity and Score')
# fig.savefig(str(matrix_name)+'_align_score_identity.png')
# plt.close()
return [best_score,best_score_c,best_identity,best_identity_c,para]
def readfa(fa_f):
# readin seqs in fasta format
# seqs foramt:[(pro,seq),...]
lines = fa_f.readlines()
lines = [line.rstrip('\r\n') for line in lines]
pro_line_num = [i for i, line in enumerate(
lines) if '>' in line] + [len(lines)]
seqs = [lines[n:pro_line_num[i + 1]]
for i, n in enumerate(pro_line_num[:-1])]
seqs = [(seq[0][1:], ''.join(seq[1:])) for seq in seqs]
return seqs
def single_align(p):
seq1,seq2,m = p
matrix = eval(m)
matrix_name = str(m)
try:
result = align(seq1,seq2,matrix,matrix_name) + [matrix_name]
return result
except:
return ''
# pa.append(result)
def main():
with open(sys.argv[-1]) as fa_f:
seqs = readfa(fa_f)
seqlen = len(seqs)
for i in range(0,len(seqs),2):
print seqs[i][1]
print seqs[i+1][1]
pa = []
# for m in matlist.available_matrices:
matrix_num = len(matlist.available_matrices)
parameters = [[seqs[i][1],seqs[i+1][1],m] for m in matlist.available_matrices]
p = Pool(16)
pa = p.map(single_align,parameters)
p.close()
# for m in matlist.available_matrices:
# matrix = eval(m)
# matrix_name = str(m)
# print i
# result = align(seqs[i][1],seqs[i+1][1],matrix,matrix_name) + [matrix_name]
# pa.append(result)
with open(str(i)+'_pa.txt','w') as w_f:
print >> w_f,'{0:<12}{1:<12}{2:<15}{3:<15}{4:<12}{5:<12}{6:<}'.format('best_score','gap_open','gap_extend','best_identity','gap_open','gap_extend','matrix')
for p in pa:
if p:
bs,bsc,bi,bic,para,mn = p
print >> w_f,'{0:<12}{1:<12.2f}{2:<15.2f}{3:<15.2f}{4:<12.2f}{5:<12.2f}{6:<}'.format(bs,bsc[0],bsc[1],bi,bic[0],bic[1],mn)
with open(str(i)+'_'+mn+'_para.txt','w') as para_f:
print >> para_f,'{0:<12}{1:<12}{2:<12}{3:<12}'.format('gap_open','gap_extend','score','identity')
for go,ge,sc,ie in para:
print >> para_f,'{0:<12.2f}{1:<12.2f}{2:<12}{3:<12}'.format(go,ge,sc,ie)
if __name__ == "__main__":
main()
| cc0-1.0 |
dstndstn/unwise-coadds | plot-tile.py | 1 | 1272 | import matplotlib
if __name__ == '__main__':
matplotlib.use('Agg')
import numpy as np
import pylab as plt
import os
from astrometry.util.fits import *
from astrometry.util.plotutils import *
from unwise_coadd import *
if __name__ == '__main__':
tile = '1336p666'
T = fits_table('unwise-%s-w3-frames.fits' % tile)
ps = PlotSequence('tile')
# approx
ra,dec = tile_to_radec(tile)
wcs = get_coadd_tile_wcs(ra, dec)
W,H = wcs.imagew, wcs.imageh
rr,dd = wcs.pixelxy2radec(np.array([1,W,W,1,1]),
np.array([1,1,H,H,1]))
print 'included:', np.sum(T.included)
T.use = (T.use == 1)
T.included = (T.included == 1)
print 'included:', len(np.flatnonzero(T.included))
plt.clf()
plt.plot(T.ra, T.dec, 'r.')
I = (T.qual_frame == 0)
p1 = plt.plot(T.ra[I], T.dec[I], 'mx')
p2 = plt.plot(T.ra[T.use], T.dec[T.use], 'b.')
I = (T.npixrchi > (T.npixoverlap * 0.01))
p3 = plt.plot(T.ra[I], T.dec[I], 'r+')
I = T.included
p4 = plt.plot(T.ra[I], T.dec[I], 'go')
plt.plot(rr, dd, 'k-')
plt.figlegend((p[0] for p in (p1,p2,p3,p4)),
('Bad qual_frame', 'Used', 'Bad rchi', 'Included'),
loc='upper right')
ps.savefig()
| gpl-2.0 |
kyleabeauchamp/DBayes | dbayes/water_lib.py | 1 | 4943 | import os
import tempfile
import sys
import pymbar
import numpy as np
import pandas as pd
import simtk.openmm.app as app
import simtk.openmm as mm
import simtk.unit as u
import mdtraj as md
import repex
ff = app.ForceField("tip3p.xml")
def build_top(box_edge=2.1 * u.nanometers, nonbondedMethod=app.CutoffPeriodic):
box = repex.testsystems.WaterBox(box_edge=box_edge, nonbondedMethod=nonbondedMethod)
system = box.system
positions = box.positions
n_atoms = len(box.positions)
top = []
bonds = []
for i in range(n_atoms / 3):
j = 3 * i
top.append(dict(serial=(j + 1), name="O", element="O", resSeq=(i+1), resName="HOH", chainID=(i+1)))
top.append(dict(serial=(j + 2), name="H", element="H", resSeq=(i+1), resName="HOH", chainID=(i+1)))
top.append(dict(serial=(j + 3), name="H", element="H", resSeq=(i+1), resName="HOH", chainID=(i+1)))
bonds.append([j + 0, j + 1])
bonds.append([j + 0, j + 2])
top = pd.DataFrame(top)
bonds = np.array(bonds, dtype='int')
top = md.Topology.from_dataframe(top, bonds)
xyz = positions / u.nanometers
boxes = box.system.getDefaultPeriodicBoxVectors()
lengths = boxes[0][0] / u.nanometers * np.ones((1, 3))
angles = 90.0 * np.ones((1, 3))
traj = md.Trajectory(xyz, top, unitcell_lengths=lengths, unitcell_angles=angles)
mmtop = traj.top.to_openmm(traj=traj)
return traj, mmtop, system, box, positions
def find_forces(system):
for force in system.getForces():
if type(force) == mm.NonbondedForce:
nonbonded_force = force
if type(force) == mm.HarmonicBondForce:
bond_force = force
if type(force) == mm.HarmonicAngleForce:
angle_force = force
return bond_force, angle_force, nonbonded_force
def set_constraints(system, r0, theta):
r1 = 2 * r0 * np.sin(theta / 2.)
n_constraints = system.getNumConstraints()
n_water = n_constraints / 3
for i in range(n_water * 2):
a0, a1, d = system.getConstraintParameters(i)
system.setConstraintParameters(i, a0, a1, r0 * u.nanometers)
for i in range(n_water * 2, n_water * 3):
a0, a1, d = system.getConstraintParameters(i)
system.setConstraintParameters(i, a0, a1, r1 * u.nanometers)
def set_nonbonded(f_nonbonded, qH, sigma, epsilon, sigmaH, epsilonH):
qO = -2.0 * qH
for k in range(f_nonbonded.getNumParticles()):
if k % 3 == 0:
f_nonbonded.setParticleParameters(k, qO * u.elementary_charge, sigma * u.nanometer, epsilon * u.kilojoule_per_mole)
else:
f_nonbonded.setParticleParameters(k, qH * u.elementary_charge, sigmaH * u.nanometer, epsilonH * u.kilojoule_per_mole)
def set_parms(system, qH, sigma, epsilon, sigmaH, epsilonH, r0, theta):
print("\nqH=%f, sigma=%f, epsilon=%f sigmaH=%f, epsilonH=%f, r0=%f, theta=%f\n" % (qH, sigma, epsilon, sigmaH, epsilonH, r0, theta))
f_bond, f_angle, f_nonbonded = find_forces(system)
set_constraints(system, r0, theta)
set_nonbonded(f_nonbonded, qH, sigma, epsilon, sigmaH, epsilonH)
def build(system, positions, mmtop, temperature, pressure, qH, sigma, epsilon, sigmaH, epsilonH, r0, theta, stderr_tolerance=0.05, n_steps=250000, nonbondedCutoff=1.1 * u.nanometer, output_frequency=250, print_frequency=None):
if print_frequency is None:
print_frequency = int(n_steps / 3.)
set_parms(system, qH, sigma, epsilon, sigmaH, epsilonH, r0, theta)
friction = 1.0 / u.picoseconds
timestep = 3.0 * u.femtoseconds
barostat_frequency = 25
path = tempfile.mkdtemp()
csv_filename = os.path.join(path, "density.csv")
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
system.addForce(mm.MonteCarloBarostat(pressure, temperature, barostat_frequency))
simulation = app.Simulation(mmtop, system, integrator)
simulation.reporters.append(app.StateDataReporter(sys.stdout, print_frequency, step=True, density=True))
simulation.reporters.append(app.StateDataReporter(csv_filename, output_frequency, density=True))
simulation.context.setPositions(positions)
print("minimizing")
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(temperature)
print("done minimizing")
converged = False
while not converged:
simulation.step(n_steps)
d = pd.read_csv(csv_filename, names=["Density"], skiprows=1)
density_ts = np.array(d.Density)
[t0, g, Neff] = pymbar.timeseries.detectEquilibration_fft(density_ts)
density_ts = density_ts[t0:]
density_mean_stderr = density_ts.std() / np.sqrt(Neff)
if density_mean_stderr < stderr_tolerance:
converged = True
print("temperature, density mean, stderr = %f, %f, %f" % (temperature / u.kelvin, density_ts.mean(), density_mean_stderr))
return d.Density.values
| gpl-2.0 |
postvakje/sympy | sympy/external/tests/test_importtools.py | 91 | 1215 | from sympy.external import import_module
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
| bsd-3-clause |
zfrenchee/pandas | asv_bench/benchmarks/strings.py | 2 | 3088 | from .pandas_vb_common import *
import string
import itertools as IT
import pandas.util.testing as testing
class StringMethods(object):
goal_time = 0.2
def make_series(self, letters, strlen, size):
return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))])
def setup(self):
self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000)
self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000)
self.s = self.make_series(string.ascii_uppercase, strlen=10, size=10000).str.join('|')
def time_cat(self):
self.many.str.cat(sep=',')
def time_center(self):
self.many.str.center(100)
def time_contains_few(self):
self.few.str.contains('matchthis')
def time_contains_few_noregex(self):
self.few.str.contains('matchthis', regex=False)
def time_contains_many(self):
self.many.str.contains('matchthis')
def time_contains_many_noregex(self):
self.many.str.contains('matchthis', regex=False)
def time_count(self):
self.many.str.count('matchthis')
def time_endswith(self):
self.many.str.endswith('matchthis')
def time_extract(self):
self.many.str.extract('(\\w*)matchthis(\\w*)')
def time_findall(self):
self.many.str.findall('[A-Z]+')
def time_get(self):
self.many.str.get(0)
def time_join_split(self):
self.many.str.join('--').str.split('--')
def time_join_split_expand(self):
self.many.str.join('--').str.split('--', expand=True)
def time_len(self):
self.many.str.len()
def time_match(self):
self.many.str.match('mat..this')
def time_pad(self):
self.many.str.pad(100, side='both')
def time_repeat(self):
self.many.str.repeat(list(IT.islice(IT.cycle(range(1, 4)), len(self.many))))
def time_replace(self):
self.many.str.replace('(matchthis)', '\x01\x01')
def time_slice(self):
self.many.str.slice(5, 15, 2)
def time_startswith(self):
self.many.str.startswith('matchthis')
def time_strip(self):
self.many.str.strip('matchthis')
def time_rstrip(self):
self.many.str.rstrip('matchthis')
def time_lstrip(self):
self.many.str.lstrip('matchthis')
def time_title(self):
self.many.str.title()
def time_upper(self):
self.many.str.upper()
def time_lower(self):
self.many.str.lower()
def time_get_dummies(self):
self.s.str.get_dummies('|')
class StringEncode(object):
goal_time = 0.2
def setup(self):
self.ser = Series(testing.makeUnicodeIndex())
def time_encode_decode(self):
self.ser.str.encode('utf-8').str.decode('utf-8')
class StringSlice(object):
goal_time = 0.2
def setup(self):
self.s = Series(['abcdefg', np.nan] * 500000)
def time_series_string_vector_slice(self):
# GH 2602
self.s.str[:5]
| bsd-3-clause |
heli522/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
michaelaye/scikit-image | doc/examples/plot_holes_and_peaks.py | 15 | 2623 | """
===============================
Filling holes and finding peaks
===============================
In this example, we fill holes (i.e. isolated, dark spots) in an image using
morphological reconstruction by erosion. Erosion expands the minimal values of
the seed image until it encounters a mask image. Thus, the seed image and mask
image represent the maximum and minimum possible values of the reconstructed
image.
We start with an image containing both peaks and holes:
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.exposure import rescale_intensity
image = data.moon()
# Rescale image intensity so that we can see dim features.
image = rescale_intensity(image, in_range=(50, 200))
# convenience function for plotting images
def imshow(image, title, **kwargs):
fig, ax = plt.subplots(figsize=(5, 4))
ax.imshow(image, **kwargs)
ax.axis('off')
ax.set_title(title)
imshow(image, 'Original image')
"""
.. image:: PLOT2RST.current_figure
Now we need to create the seed image, where the minima represent the starting
points for erosion. To fill holes, we initialize the seed image to the maximum
value of the original image. Along the borders, however, we use the original
values of the image. These border pixels will be the starting points for the
erosion process. We then limit the erosion by setting the mask to the values
of the original image.
"""
import numpy as np
from skimage.morphology import reconstruction
seed = np.copy(image)
seed[1:-1, 1:-1] = image.max()
mask = image
filled = reconstruction(seed, mask, method='erosion')
imshow(filled, 'after filling holes',vmin=image.min(), vmax=image.max())
"""
.. image:: PLOT2RST.current_figure
As shown above, eroding inward from the edges removes holes, since (by
definition) holes are surrounded by pixels of brighter value. Finally, we can
isolate the dark regions by subtracting the reconstructed image from the
original image.
"""
imshow(image - filled, 'holes')
# plt.title('holes')
"""
.. image:: PLOT2RST.current_figure
Alternatively, we can find bright spots in an image using morphological
reconstruction by dilation. Dilation is the inverse of erosion and expands the
*maximal* values of the seed image until it encounters a mask image. Since this
is an inverse operation, we initialize the seed image to the minimum image
intensity instead of the maximum. The remainder of the process is the same.
"""
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
rec = reconstruction(seed, mask, method='dilation')
imshow(image - rec, 'peaks')
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| bsd-3-clause |
MechCoder/scikit-garden | skgarden/tests/test_forest.py | 1 | 2169 | import numpy as np
from functools import partial
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from skgarden import RandomForestRegressor
from skgarden import ExtraTreesRegressor
def check_variance_toy_data(Regressor):
# Split into [2, 3, 4] and [100, 103, 106]
X = [[2.0, 1.], [3.0, 1.0], [4., 1.0],
[109.0, 1.0], [110.0, 1.0], [111., 1.]]
y = [2, 3, 4, 100, 103, 106]
reg = Regressor(max_depth=1, random_state=1)
reg.fit(X, y)
pred, var = reg.predict(X, return_std=True)
assert_array_equal(pred, [3, 3, 3, 103, 103, 103])
assert_array_almost_equal(
var, np.sqrt([0.666667, 0.666667, 0.666667, 6.0, 6.0, 6.0]))
def test_variance_toy_data():
"""Test that `return_std` behaves expected on toy data."""
for Regressor in [partial(RandomForestRegressor, bootstrap=False),
ExtraTreesRegressor]:
yield check_variance_toy_data, Regressor
def check_variance_no_split(Regressor):
rng = check_random_state(0)
X = np.ones((1000, 1))
y = rng.normal(size=(1000,))
reg = Regressor(random_state=0, max_depth=3)
reg.fit(X, y)
pred, std = reg.predict(X, return_std=True)
assert_array_almost_equal([np.std(y)] * 1000, std)
assert_array_almost_equal([np.mean(y)] * 1000, pred)
def test_variance_no_split():
"""
Test that `return_std` behaves expected on a tree with one node.
The decision tree should not produce a split, because there is
no information gain which enables us to verify the mean and
standard deviation.
"""
for Regressor in [partial(RandomForestRegressor, bootstrap=False),
ExtraTreesRegressor]:
yield check_variance_no_split, Regressor
def test_min_variance():
rng = check_random_state(0)
X = rng.normal(size=(1000, 1))
y = np.ones(1000)
rf = RandomForestRegressor(min_variance=0.1)
rf.fit(X, y)
mean, std = rf.predict(X, return_std=True)
assert_array_almost_equal(mean, y)
assert_array_almost_equal(std, np.sqrt(0.1*np.ones(1000)))
| bsd-3-clause |
lenovor/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
tkarna/cofs | examples/columbia_plume/plot_elevation_ts.py | 2 | 3162 | """
Plots elevation time series
"""
import h5py
from netCDF4 import Dataset
from thetis.timezone import *
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
timezone = FixedTimeZone(-8, 'PST')
init_date = datetime.datetime(2006, 5, 10, tzinfo=timezone)
epoch = datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)
def simtime_to_datetime(simtime, init_date):
return np.array([init_date + datetime.timedelta(seconds=float(t)) for t in simtime])
def simtime_to_epoch(simtime, init_date):
offset = (init_date - epoch).total_seconds()
return simtime + offset
def epoch_to_datetime(time):
if isinstance(time, np.ndarray):
return np.array([epoch + datetime.timedelta(seconds=float(t)) for t in time])
return epoch + datetime.timedelta(seconds=float(time))
def read_netcdf(fn):
d = Dataset(fn)
assert 'time' in d.variables.keys(), 'netCDF file does not contain time variable'
out = OrderedDict()
# assuming epoch time
out['time'] = d['time'][:]
for k in d.variables.keys():
if k == 'time':
continue
out[k] = d[k][:]
return out
def read_hdf5(fn):
d = h5py.File(fn)
assert 'time' in d, 'hdf5 file does not contain time variable'
out = OrderedDict()
# assuming simulation time
time = simtime_to_epoch(d['time'][:], init_date)
out['time'] = time
for k in d.keys():
if k == 'time':
continue
out[k] = d[k][:]
return out
def make_plot(data):
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
t_min = np.finfo('d').max
t_max = np.finfo('d').min
for tag in data:
d = data[tag]
time = d['time']
vals = d[list(d.keys())[1]]
datetime_arr = epoch_to_datetime(time)
t_min = min(t_min, time[0])
t_max = max(t_max, time[-1])
ax.plot(datetime_arr, vals, label=tag, alpha=0.8)
ax.set_ylabel('Elevation [m]')
fig.autofmt_xdate()
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 1.))
date_str = '_'.join([epoch_to_datetime(t).strftime('%Y-%m-%d') for t in [t_min, t_max]])
imgfn = 'ts_cmop_elev_tpoin_{:}.png'.format(date_str)
print('Saving {:}'.format(imgfn))
fig.savefig(imgfn, dpi=200, bbox_inches='tight')
def process(file_list):
files = [f for f in file_list if f[-1] != ':']
tags = [f[:-1] for f in file_list if f[-1] == ':']
if len(tags) == 0:
tags = files
assert len(tags) == len(files)
data = OrderedDict()
for f, t in zip(files, tags):
if f.endswith('.hdf5'):
d = read_hdf5(f)
elif f.endswith('.nc'):
d = read_netcdf(f)
else:
raise IOError('Unknown file format {:}'.format(f))
data[t] = d
make_plot(data)
def get_argparser():
return parser
def parse_options():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(dest='file_list', type=str, nargs='+',
help='hdf5 or netcdf file to plot')
args = parser.parse_args()
process(args.file_list)
if __name__ == '__main__':
parse_options()
| mit |
maanitm/MadMobile | testing/distanceDetection.py | 1 | 1062 | # import the necessary packages
import numpy as np
import argparse
import glob
import cv2
import imutils
from matplotlib import pyplot as plt
cameraL = cv2.VideoCapture(1)
cameraR = cv2.VideoCapture(2)
print(cameraL.get(3))
print(cameraL.get(4))
print(cameraR.get(3))
print(cameraR.get(4))
while True:
(grabbedL, camL) = cameraL.read()
(grabbedR, camR) = cameraR.read()
# camL = imutils.resize(camL, width=1280, height=960)
# camR = imutils.resize(camR, width=1280, height=960)
imgL = cv2.cvtColor(camL, cv2.COLOR_BGR2GRAY)
# camR = camR[120:840, 0:1280]
imgR = cv2.cvtColor(camR, cv2.COLOR_BGR2GRAY)
#
# height, width = imgL.shape[:2]
# print(width)
# print(height)
# height, width = imgR.shape[:2]
# print(width)
# print(height)
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(imgL,imgR)
plt.imshow(disparity,'gray')
plt.show()
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
| apache-2.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_NonLinHardSoftShear/Shear_Zone_Length/SZ_h_1e-3/Normalized_Shear_Stress_Plot.py | 24 | 3505 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Shear.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(shear_strain,shear_stress/normal_stress,'-r',label='Analytical Solution', Linewidth=4)
plt.xlabel(r"Shear Strain $\gamma $")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Tangential_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(shear_strain,shear_stress/normal_stress,'-k',label='Numerical Solution', Linewidth=4)
plt.xlabel(r"Shear Strain $\gamma $")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
########################################################
# # axes = plt.gca()
# # axes.set_xlim([-7,7])
# # axes.set_ylim([-1,1])
outfigname = "Normalized_Shear_Stress.pdf";
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
legend.get_frame().set_facecolor('none')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.