repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
SANDAG/urbansim | urbansim/utils/tests/test_yamlio.py | 5 | 3287 | import os
import tempfile
from StringIO import StringIO
import numpy as np
import pandas as pd
import pytest
import yaml
from pandas.util import testing as pdt
from .. import yamlio
@pytest.fixture
def test_cfg():
return {
'name': 'test',
'ytransform': 'xyz',
'unordered': 'abc'
}
@pytest.fixture
def expected_yaml():
return (
'name: test{linesep}{linesep}'
'ytransform: xyz{linesep}{linesep}'
'unordered: abc{linesep}').format(linesep=os.linesep)
@pytest.fixture
def test_file(request):
name = tempfile.NamedTemporaryFile(suffix='.yaml').name
def cleanup():
if os.path.exists(name):
os.remove(name)
request.addfinalizer(cleanup)
return name
def test_ordered_yaml(test_cfg, expected_yaml):
test_yaml = yamlio.ordered_yaml(test_cfg)
assert test_yaml == expected_yaml
def test_convert_to_yaml_str(test_cfg, expected_yaml):
test_yaml = yamlio.convert_to_yaml(test_cfg, str_or_buffer=None)
assert test_yaml == expected_yaml
def test_convert_to_yaml_file(test_cfg, expected_yaml, test_file):
yamlio.convert_to_yaml(test_cfg, test_file)
with open(test_file) as f:
assert f.read() == expected_yaml
def test_convert_to_yaml_buffer(test_cfg, expected_yaml):
test_buffer = StringIO()
yamlio.convert_to_yaml(test_cfg, test_buffer)
assert test_buffer.getvalue() == expected_yaml
class Test_yaml_to_dict(object):
@classmethod
def setup_class(cls):
cls.yaml_str = """
a:
x: 1
y: 2
z: 3
b:
x: 3
y: 4
z: 5
"""
cls.expect_dict = {
'a': {'x': 1, 'y': 2, 'z': 3},
'b': {'x': 3, 'y': 4, 'z': 5}}
def test_str(self):
assert yamlio.yaml_to_dict(yaml_str=self.yaml_str) == self.expect_dict
def test_file(self, test_file):
with open(test_file, 'w') as f:
f.write(self.yaml_str)
assert yamlio.yaml_to_dict(str_or_buffer=test_file) == self.expect_dict
def test_buffer(self):
buff = StringIO(self.yaml_str)
buff.seek(0)
assert yamlio.yaml_to_dict(str_or_buffer=buff) == self.expect_dict
def test_raises(self):
with pytest.raises(ValueError):
yamlio.yaml_to_dict()
def test_series_to_yaml_safe_int_index():
s = pd.Series(np.arange(100, 103), index=np.arange(3))
d = yamlio.series_to_yaml_safe(s)
assert d == {0: 100, 1: 101, 2: 102}
y = yaml.dump(d, default_flow_style=False)
pdt.assert_series_equal(pd.Series(yaml.load(y)), s)
def test_series_to_yaml_safe_str_index():
s = pd.Series(
np.array(['a', 'b', 'c']), index=np.array(['x', 'y', 'z']))
d = yamlio.series_to_yaml_safe(s)
assert d == {'x': 'a', 'y': 'b', 'z': 'c'}
y = yaml.dump(d, default_flow_style=False)
pdt.assert_series_equal(pd.Series(yaml.load(y)), s)
def test_frame_to_yaml_safe():
df = pd.DataFrame(
{'col1': np.array([100, 200, 300]),
'col2': np.array(['a', 'b', 'c'])},
index=np.arange(3))
d = yamlio.frame_to_yaml_safe(df)
assert d == {'col1': {0: 100, 1: 200, 2: 300},
'col2': {0: 'a', 1: 'b', 2: 'c'}}
y = yaml.dump(d, default_flow_style=False)
pdt.assert_frame_equal(pd.DataFrame(yaml.load(y)), df)
| bsd-3-clause |
quheng/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
mugizico/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
ishanic/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
smrjan/seldon-server | docker/examples/US_stocks_fund/create_pipeline.py | 2 | 17985 | import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn import preprocessing
from seldon.tensorflow_wrapper import TensorFlowWrapper
from sklearn.pipeline import Pipeline
import seldon.pipeline.util as sutl
import argparse
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
df_Xy = pd.read_csv('home/seldon/data/indicators_nan_replaced.csv')
#df_Xy = pd.read_csv('home/seldon/data/indicators_nan_replacedv2.csv')
#df_Xy = pd.read_csv('home/seldon/data/indicators_nan_replacedv3.csv')
logger.info('tf version: %s ' % tf.__version__)
def get_data(split_train_test=False):
cols_tokeep = df_Xy.columns.tolist()
logger.debug('columns to keep: ')
logger.debug(cols_tokeep)
if 'company_id' in cols_tokeep:
cols_tokeep.remove('company_id')
Xy = df_Xy.as_matrix(columns=cols_tokeep)
Xy_shuffled = np.random.permutation(Xy)
(means,stds) = (np.mean(Xy_shuffled[:,:-1],axis=0).reshape((1,Xy_shuffled[:,:-1].shape[1])),
np.std(Xy_shuffled[:,:-1],axis=0).reshape((1,Xy_shuffled[:,:-1].shape[1])))
for i,v in enumerate(means[0]):
if i%2!=0:
means[0,i]=0
stds[0,i]=1
if split_train_test:
#split train-test
split_ratio = int(0.7*(len(Xy)))
Xy_train = Xy_shuffled[:split_ratio,:]
Xy_test = Xy_shuffled[split_ratio:,:]
dataset = {'train':Xy_train, 'test':Xy_test, 'means':means, 'stds':stds}
else:
#no splitting
dataset = {'train':Xy_shuffled, 'test':Xy_shuffled, 'means':means, 'stds':stds}
return dataset
def get_data_v3(split_train_test=False):
cols_tokeep = df_Xy.columns.tolist()
logger.debug('columns to keep')
logger.debug(cols_tokeep)
if 'company_id' in cols_tokeep:
cols_tokeep.remove('company_id')
Xy = df_Xy.as_matrix(columns=cols_tokeep)
Xy_shuffled = np.random.permutation(Xy)
(means,stds) = (np.mean(Xy_shuffled[:,:-1],axis=0).reshape((1,Xy_shuffled[:,:-1].shape[1])),
np.std(Xy_shuffled[:,:-1],axis=0).reshape((1,Xy_shuffled[:,:-1].shape[1])))
if split_train_test:
#split train-test
split_ratio = int(0.7*(len(Xy)))
Xy_train = Xy_shuffled[:split_ratio,:]
Xy_test = Xy_shuffled[split_ratio:,:]
dataset = {'train':Xy_train, 'test':Xy_test, 'means':means, 'stds':stds}
else:
#no splitting
dataset = {'train':Xy_shuffled, 'test':Xy_shuffled, 'means':means, 'stds':stds}
return dataset
def fill_feed_dict_train(in_pl,
y_pl,
dataset,
iterator,
batch_size=128):
train = dataset['train']
if batch_size=='all':
feed_dict_train = {in_pl : train[:,:-1],
y_pl : train[:,-1].reshape((len(train),1))}
else:
nb_batches = int(dataset['train'].shape[0]/batch_size)
j = iterator % nb_batches
feed_dict_train = {in_pl : train[j*batch_size:(j+1)*batch_size,:-1],
y_pl : train[j*batch_size:(j+1)*batch_size,-1].reshape((batch_size,1))}
return feed_dict_train
def fill_feed_dict_test(in_pl,
y_pl,
dataset):
test = dataset['test']
feed_dict_test = {in_pl : test[:,:-1],
y_pl : test[:,-1].reshape((len(test),1))}
return feed_dict_test
dataset = get_data()
#dataset = get_data_v3()
# model v1
def create_pipeline_v1(load=None):
nb_features = 58
nb_hidden1 = 116
nb_hidden2 = 29
batch_size = 64
nb_iter = 30001
lamb = 0.0001
in_pl = tf.placeholder(dtype=tf.float32,
shape=(None,nb_features),
name='input_placeholder')
means = tf.constant(dataset['means'],
dtype=tf.float32,
shape=(1,nb_features),
name='features_means')
stds = tf.constant(dataset['stds'],
dtype=tf.float32,
shape=(1,nb_features),
name='features_stds_placeholder')
means_tiled = tf.tile(means,[tf.shape(in_pl)[0],1])
stds_tiled = tf.tile(stds,[tf.shape(in_pl)[0],1])
#scaled inputs
inp = (in_pl - means_tiled)/(stds_tiled+1e-10)
y_pl = tf.placeholder(dtype=tf.float32,
shape=(None,1),
name='target_placeholder')
W1 = tf.Variable(tf.truncated_normal([nb_features,nb_hidden1]),
dtype=tf.float32,
name='first_layer_weights')
W1_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W1))
b1 = tf.Variable(tf.zeros(shape=[nb_hidden1]))
#first hidden layer
h1 = tf.nn.relu(tf.matmul(inp,W1) + b1,
name='first_hidden_layer')
W2 = tf.Variable(tf.truncated_normal([nb_hidden1,nb_hidden2]),
dtype=tf.float32,
name='second_layer_weights')
W2_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W2))
b2 = tf.Variable(tf.zeros(shape=[nb_hidden2]))
#second hidden layer
h2 = tf.sigmoid(tf.matmul(h1,W2) + b2,
name='second_hidden_layer')
W3 = tf.Variable(tf.truncated_normal([nb_hidden2,1]),
dtype=tf.float32,
name='last_layer_weights')
W3_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W3))
b3 = tf.Variable(tf.zeros(shape=[1]))
#out layer
out = tf.sigmoid(tf.matmul(h2,W3) + b3,
name='output_layer')
proba = tf.squeeze(tf.pack([1-out,out],2),
squeeze_dims=[1])
L2reg = lamb*(W1_L2reg + W2_L2reg + W3_L2reg)
cross_entropy = -(1/float(2))*tf.reduce_mean(y_pl * tf.log(out+1e-10) + (1-y_pl) * tf.log(1-out+1e-10),
name='cost_function')
cost = cross_entropy + L2reg
train_step = tf.train.AdamOptimizer(1e-4).minimize(cost)
init = tf.initialize_all_variables()
sess = tf.Session()
logger.info('Training model...')
logger.info('model version: %i' % 1)
sess.run(init)
for i in range(nb_iter):
if i % 1000 == 0:
logger.info('iteration %i of %i' % (i,nb_iter))
feed_dict_train = fill_feed_dict_train(in_pl,
y_pl,
dataset,
i,
batch_size=batch_size)
(_,
W3_value,
cost_value,
out_value) = sess.run([train_step,
W3,
cost,
out],
feed_dict=feed_dict_train)
if i % 10000 == 0:
# feed_dict_test = fill_feed_dict_test(in_pl,
# y_pl,
# dataset)
inp_values,proba_values = sess.run([inp,proba],feed_dict=feed_dict_train)
logger.debug('scaled inputs:')
logger.debug(inp_values)
logger.debug('probabilities:')
logger.debug(proba_values)
logger.debug('proba out shape:')
logger.debug(proba_values.shape)
logger.debug('cost: %f' % cost_value)
tfw = TensorFlowWrapper(sess,tf_input=in_pl,tf_output=proba,
target="y",target_readable="class",excluded=['class'])
return Pipeline([('deep_classifier',tfw)])
#model v2
def create_pipeline_v2(load=None):
nb_features = 58
nb_hidden1 = 400
nb_hidden2 = 200
nb_hidden3 = 100
batch_size = 64
nb_iter = 30001
lamb = 0.0001
in_pl = tf.placeholder(dtype=tf.float32,
shape=(None,nb_features),
name='input_placeholder')
means = tf.constant(dataset['means'],
dtype=tf.float32,
shape=(1,nb_features),
name='features_means')
stds = tf.constant(dataset['stds'],
dtype=tf.float32,
shape=(1,nb_features),
name='features_stds_placeholder')
means_tiled = tf.tile(means,[tf.shape(in_pl)[0],1])
stds_tiled = tf.tile(stds,[tf.shape(in_pl)[0],1])
#scaled inputs
inp = (in_pl - means_tiled)/(stds_tiled+1e-10)
y_pl = tf.placeholder(dtype=tf.float32,
shape=(None,1),
name='target_placeholder')
#first hidden layer
W1 = tf.Variable(tf.truncated_normal([nb_features,nb_hidden1]),
dtype=tf.float32,
name='first_layer_weights')
W1_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W1))
b1 = tf.Variable(tf.zeros(shape=[nb_hidden1]))
h1 = tf.sigmoid(tf.matmul(inp,W1) + b1,
name='first_hidden_layer')
#second hidden layer
W2 = tf.Variable(tf.truncated_normal([nb_hidden1,nb_hidden2]),
dtype=tf.float32,
name='second_layer_weights')
W2_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W2))
b2 = tf.Variable(tf.zeros(shape=[nb_hidden2]))
h2 = tf.sigmoid(tf.matmul(h1,W2) + b2,
name='second_hidden_layer')
#third hidden layer
W3 = tf.Variable(tf.truncated_normal([nb_hidden2,nb_hidden3]),
dtype=tf.float32,
name='third_layer_weights')
W3_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W3))
b3 = tf.Variable(tf.zeros(shape=[nb_hidden3]))
h3 = tf.sigmoid(tf.matmul(h2,W3) + b3,
name='third_hidden_layer')
#out layer
W4 = tf.Variable(tf.truncated_normal([nb_hidden3,1]),
dtype=tf.float32,
name='last_layer_weights')
W4_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W4))
b4 = tf.Variable(tf.zeros(shape=[1]))
out = tf.sigmoid(tf.matmul(h3,W4) + b4,
name='output_layer')
proba = tf.squeeze(tf.pack([1-out,out],2),
squeeze_dims=[1])
L2reg = lamb*(W1_L2reg + W2_L2reg + W3_L2reg + W4_L2reg)
cross_entropy = -(1/float(2))*tf.reduce_mean(y_pl * tf.log(out+1e-10) + (1-y_pl) * tf.log(1-out+1e-10),
name='cost_function')
cost = cross_entropy + L2reg
train_step = tf.train.AdamOptimizer(1e-4).minimize(cost)
init = tf.initialize_all_variables()
sess = tf.Session()
logger.info('Training model...')
logger.info('model version: %i' % 2)
sess.run(init)
for i in range(nb_iter):
if i % 1000 == 0:
logger.info('iteration %i of %i' % (i,nb_iter))
feed_dict_train = fill_feed_dict_train(in_pl,
y_pl,
dataset,
i,
batch_size=batch_size)
(_,
W3_value,
cost_value,
out_value) = sess.run([train_step,
W3,
cost,
out],
feed_dict=feed_dict_train)
if i % 10000 == 0:
# feed_dict_test = fill_feed_dict_test(in_pl,
# y_pl,
# dataset)
inp_values,proba_values = sess.run([inp,proba],feed_dict=feed_dict_train)
logger.debug('scaled inputs')
logger.debug(inp_values)
logger.debug('probabilities')
logger.debug(proba_values)
logger.debug('proba out shape')
logger.debug(proba_values.shape)
logger.debug('cost')
logger.debug(cost_value)
tfw = TensorFlowWrapper(sess,tf_input=in_pl,tf_output=proba,
target="y",target_readable="class",excluded=['class'])
return Pipeline([('deep_classifier',tfw)])
# model v3
def create_pipeline_v3(load=None):
nb_features = 29
nb_hidden1 = 400
nb_hidden2 = 200
nb_hidden3 = 100
batch_size = 64
nb_iter = 30001
lamb = 0.0001
in_pl = tf.placeholder(dtype=tf.float32,
shape=(None,nb_features),
name='input_placeholder')
means = tf.constant(dataset['means'],
dtype=tf.float32,
shape=(1,nb_features),
name='features_means')
stds = tf.constant(dataset['stds'],
dtype=tf.float32,
shape=(1,nb_features),
name='features_stds_placeholder')
means_tiled = tf.tile(means,[tf.shape(in_pl)[0],1])
stds_tiled = tf.tile(stds,[tf.shape(in_pl)[0],1])
#scaled inputs
inp = (in_pl - means_tiled)/(stds_tiled+1e-10)
y_pl = tf.placeholder(dtype=tf.float32,
shape=(None,1),
name='target_placeholder')
#first hidden layer
W1 = tf.Variable(tf.truncated_normal([nb_features,nb_hidden1]),
dtype=tf.float32,
name='first_layer_weights')
W1_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W1))
b1 = tf.Variable(tf.zeros(shape=[nb_hidden1]))
h1 = tf.sigmoid(tf.matmul(inp,W1) + b1,
name='first_hidden_layer')
#second hidden layer
W2 = tf.Variable(tf.truncated_normal([nb_hidden1,nb_hidden2]),
dtype=tf.float32,
name='second_layer_weights')
W2_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W2))
b2 = tf.Variable(tf.zeros(shape=[nb_hidden2]))
h2 = tf.sigmoid(tf.matmul(h1,W2) + b2,
name='second_hidden_layer')
#third hidden layer
W3 = tf.Variable(tf.truncated_normal([nb_hidden2,nb_hidden3]),
dtype=tf.float32,
name='third_layer_weights')
W3_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W3))
b3 = tf.Variable(tf.zeros(shape=[nb_hidden3]))
h3 = tf.sigmoid(tf.matmul(h2,W3) + b3,
name='third_hidden_layer')
#out layer
W4 = tf.Variable(tf.truncated_normal([nb_hidden3,1]),
dtype=tf.float32,
name='last_layer_weights')
W4_L2reg = (1/2*batch_size)*tf.reduce_sum(tf.square(W4))
b4 = tf.Variable(tf.zeros(shape=[1]))
out = tf.sigmoid(tf.matmul(h3,W4) + b4,
name='output_layer')
proba = tf.squeeze(tf.pack([1-out,out],2),
squeeze_dims=[1])
L2reg = lamb*(W1_L2reg + W2_L2reg + W3_L2reg + W4_L2reg)
cross_entropy = -(1/float(2))*tf.reduce_mean(y_pl * tf.log(out+1e-10) + (1-y_pl) * tf.log(1-out+1e-10),
name='cost_function')
cost = cross_entropy + L2reg
train_step = tf.train.AdamOptimizer(1e-4).minimize(cost)
init = tf.initialize_all_variables()
sess = tf.Session()
logger.info('Training model...')
logger.info('model version %i' % 3)
sess.run(init)
for i in range(nb_iter):
if i % 1000 == 0:
logger.info('iteration %i of %i' % (i,nb_iter))
feed_dict_train = fill_feed_dict_train(in_pl,
y_pl,
dataset,
i,
batch_size=batch_size)
(_,
W3_value,
cost_value,
out_value) = sess.run([train_step,
W3,
cost,
out],
feed_dict=feed_dict_train)
if i % 10000 == 0:
# feed_dict_test = fill_feed_dict_test(in_pl,
# y_pl,
# dataset)
inp_values,proba_values = sess.run([inp,proba],feed_dict=feed_dict_train)
logger.debug('scaled inputs')
logger.debug(inp_values)
logger.debug('probabilities')
logger.debug(proba_values)
logger.debug('proba out shape')
logger.debug(proba_values.shape)
logger.debug('cost')
logger.debug(cost_value)
tfw = TensorFlowWrapper(sess,tf_input=in_pl,tf_output=proba,
target="y",target_readable="class",excluded=['class'])
return Pipeline([('deep_classifier',tfw)])
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='pipeline_example')
parser.add_argument('-m','--model', help='model output folder', required=True)
parser.add_argument('-l','--load',help='Load pretrained model from file')
args = parser.parse_args()
p = create_pipeline_v1(args.load)
# p = create_pipeline_v2(args.load)
# p = create_pipeline_v3(args.load)
pw = sutl.PipelineWrapper()
pw.save_pipeline(p,args.model)
logger.info('tf version: %s' % tf.__version__)
logger.info('pipeline saved in %s' % args.model)
| apache-2.0 |
sinall/ShiPanE-Python-SDK | strategyease_sdk/client.py | 1 | 11828 | # -*- coding: utf-8 -*-
import datetime
import re
from enum import Enum
import lxml.html
import pandas as pd
import requests
import six
import tushare as ts
from lxml import etree
from requests import Request
from requests.auth import HTTPBasicAuth
from six import StringIO
from six.moves.urllib.parse import urlencode
class MediaType(Enum):
DEFAULT = 'application/json'
JOIN_QUANT = 'application/vnd.joinquant+json'
class ConnectionMethod(Enum):
DIRECT = 'DIRECT'
PROXY = 'PROXY'
class Client(object):
VERSION = 'v1.0'
KEY_REGEX = r'key=([^&]*)'
def __init__(self, logger=None, **kwargs):
if logger is not None:
self._logger = logger
else:
import logging
self._logger = logging.getLogger(__name__)
self._connection_method = ConnectionMethod[kwargs.pop('connection_method', 'DIRECT')]
if self._connection_method is ConnectionMethod.DIRECT:
self._host = kwargs.pop('host', 'localhost')
self._port = kwargs.pop('port', 8888)
else:
self._proxy_base_url = kwargs.pop('proxy_base_url')
self._proxy_username = kwargs.pop('proxy_username')
self._proxy_password = kwargs.pop('proxy_password')
self._instance_id = kwargs.pop('instance_id')
self._base_url = self.__create_base_url()
self._key = kwargs.pop('key', '')
self._client = kwargs.pop('client', '')
self._timeout = kwargs.pop('timeout', (5.0, 10.0))
@property
def host(self):
return self._host
@host.setter
def host(self, value):
self._host = value
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
def get_statuses(self, timeout=None):
request = Request('GET', self.__create_url(None, 'statuses'))
response = self.__send_request(request, timeout)
return response.json()
def get_account(self, client=None, timeout=None):
request = Request('GET', self.__create_url(client, 'accounts'))
response = self.__send_request(request, timeout)
return response.json()
# You should use get_portfolio
def get_positions(self, client=None, media_type=MediaType.DEFAULT, timeout=None):
request = Request('GET', self.__create_url(client, 'positions'))
request.headers['Accept'] = media_type.value
response = self.__send_request(request, timeout)
json = response.json()
if media_type == MediaType.DEFAULT:
sub_accounts = pd.DataFrame(json['subAccounts']).T
positions = pd.DataFrame(json['dataTable']['rows'], columns=json['dataTable']['columns'])
portfolio = {'sub_accounts': sub_accounts, 'positions': positions}
return portfolio
return json
def get_portfolio(self, client=None, media_type=MediaType.DEFAULT, timeout=None):
request = Request('GET', self.__create_url(client, 'portfolios'))
request.headers['Accept'] = media_type.value
response = self.__send_request(request, timeout)
json = response.json()
if media_type == MediaType.DEFAULT:
sub_accounts = pd.DataFrame(json['subAccounts']).T
positions = pd.DataFrame(json['dataTable']['rows'], columns=json['dataTable']['columns'])
portfolio = {'sub_accounts': sub_accounts, 'positions': positions}
return portfolio
return json
def get_orders(self, client=None, status="", timeout=None):
request = Request('GET', self.__create_url(client, 'orders', status=status))
response = self.__send_request(request, timeout)
json = response.json()
df = pd.DataFrame(json['dataTable']['rows'], columns=json['dataTable']['columns'])
return df
def buy(self, client=None, timeout=None, **kwargs):
kwargs['action'] = 'BUY'
return self.__execute(client, timeout, **kwargs)
def sell(self, client=None, timeout=None, **kwargs):
kwargs['action'] = 'SELL'
return self.__execute(client, timeout, **kwargs)
def ipo(self, client=None, timeout=None, **kwargs):
kwargs['action'] = 'IPO'
return self.__execute(client, timeout, **kwargs)
def execute(self, client=None, timeout=None, **kwargs):
return self.__execute(client, timeout, **kwargs)
def cancel(self, client=None, order_id=None, symbol=None, timeout=None):
request = Request('DELETE', self.__create_order_url(client, order_id, symbol=symbol))
self.__send_request(request, timeout)
def cancel_all(self, client=None, timeout=None):
request = Request('DELETE', self.__create_order_url(client))
self.__send_request(request, timeout)
def query(self, client=None, type=None, navigation=None, timeout=None):
request = Request('GET', self.__create_url(client, 'reports', type=type, navigation=navigation))
response = self.__send_request(request, timeout)
json = response.json()
df = pd.DataFrame(json['dataTable']['rows'], columns=json['dataTable']['columns'])
return df
def query_new_stocks(self):
return self.__query_new_stocks()
def query_convertible_bonds(self):
return self.__query_convertible_bonds()
def purchase_new_stocks(self, client=None, timeout=None):
today = datetime.datetime.strftime(datetime.datetime.today(), '%Y-%m-%d')
df = self.query_new_stocks()
df = df[(df.ipo_date == today)]
self._logger.info('今日有[{}]支可申购新股'.format(len(df)))
for index, row in df.iterrows():
try:
order = {
'symbol': row['xcode'],
'price': row['price'],
'amountProportion': 'ALL'
}
self._logger.info('申购新股:{}'.format(order))
self.ipo(client, timeout, **order)
except Exception as e:
self._logger.error(
'客户端[{}]申购新股[{}({})]失败\n{}'.format((client or self._client), row['name'], row['code'], e))
def purchase_convertible_bonds(self, client=None, timeout=None):
today = datetime.datetime.strftime(datetime.datetime.today(), '%Y-%m-%d')
df = self.query_convertible_bonds()
df = df[(df.ipo_date == today)]
self._logger.info('今日有[{}]支可申购转债'.format(len(df)))
for index, row in df.iterrows():
try:
order = {
'symbol': row['xcode'],
'price': 100,
'amountProportion': 'ALL'
}
self._logger.info('申购转债:{}'.format(order))
self.buy(client, timeout, **order)
except Exception as e:
self._logger.error(
'客户端[{}]申购转债[{}({})]失败\n{}'.format((client or self._client), row['bname'], row['xcode'], e))
def create_adjustment(self, client=None, request_json=None, timeout=None):
request = Request('POST', self.__create_url(client, 'adjustments'), json=request_json)
request.headers['Content-Type'] = MediaType.JOIN_QUANT.value
response = self.__send_request(request, timeout)
json = response.json()
return json
def start_clients(self, timeout=None):
self.__change_clients_status('LOGGED')
def shutdown_clients(self, timeout=None):
self.__change_clients_status('STOPPED')
def __execute(self, client=None, timeout=None, **kwargs):
if not kwargs.get('type'):
kwargs['type'] = 'LIMIT'
request = Request('POST', self.__create_order_url(client), json=kwargs)
response = self.__send_request(request)
return response.json()
def __change_clients_status(self, status, timeout=None):
request = Request('PATCH', self.__create_url(None, 'clients'), json={
'status': status
})
self.__send_request(request, timeout)
def __query_new_stocks(self):
DATA_URL = 'http://vip.stock.finance.sina.com.cn/corp/view/vRPD_NewStockIssue.php?page=1&cngem=0&orderBy=NetDate&orderType=desc'
html = lxml.html.parse(DATA_URL)
res = html.xpath('//table[@id=\"NewStockTable\"]/tr')
if six.PY2:
sarr = [etree.tostring(node) for node in res]
else:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('<font color="red">*</font>', '')
sarr = '<table>%s</table>' % sarr
df = pd.read_html(StringIO(sarr), skiprows=[0, 1])[0]
df = df.select(lambda x: x in [0, 1, 2, 3, 7], axis=1)
df.columns = ['code', 'xcode', 'name', 'ipo_date', 'price']
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
df['xcode'] = df['xcode'].map(lambda x: str(x).zfill(6))
return df
def __query_convertible_bonds(self):
df = ts.new_cbonds()
return df
def __create_order_url(self, client=None, order_id=None, **params):
return self.__create_url(client, 'orders', order_id, **params)
def __create_url(self, client, resource, resource_id=None, **params):
all_params = dict((k, v) for k, v in params.items() if v is not None)
all_params.update(client=(client or self._client))
all_params.update(key=(self._key or ''))
if resource_id is None:
path = '/{}'.format(resource)
else:
path = '/{}/{}'.format(resource, resource_id)
url = '{}/api/{}{}?{}'.format(self._base_url, self.VERSION, path, urlencode(all_params))
return url
def __create_base_url(self):
if self._connection_method is ConnectionMethod.DIRECT:
return 'http://{}:{}'.format(self._host, self._port)
else:
return self._proxy_base_url
def __send_request(self, request, timeout=None):
if self._connection_method is ConnectionMethod.PROXY:
request.auth = HTTPBasicAuth(self._proxy_username, self._proxy_password)
request.headers['X-Instance-ID'] = self._instance_id
prepared_request = request.prepare()
self.__log_request(prepared_request)
with requests.sessions.Session() as session:
response = session.send(prepared_request, timeout=(timeout or self._timeout))
self.__log_response(response)
response.raise_for_status()
return response
def __log_request(self, prepared_request):
url = self.__eliminate_privacy(prepared_request.path_url)
if prepared_request.body is None:
self._logger.info('Request:\n{} {}'.format(prepared_request.method, url))
else:
self._logger.info('Request:\n{} {}\n{}'.format(prepared_request.method, url, prepared_request.body))
def __log_response(self, response):
message = u'Response:\n{} {}\n{}'.format(response.status_code, response.reason, response.text)
if response.status_code == 200:
self._logger.info(message)
else:
self._logger.error(message)
@classmethod
def __eliminate_privacy(cls, url):
match = re.search(cls.KEY_REGEX, url)
if match is None:
return url
key = match.group(1)
masked_key = '*' * len(key)
url = re.sub(cls.KEY_REGEX, "key={}".format(masked_key), url)
return url
| mit |
cmorgan/zipline | zipline/utils/data_source_tables_gen.py | 40 | 7380 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import getopt
import traceback
import numpy as np
import pandas as pd
import datetime
import logging
import tables
import gzip
import glob
import os
import random
import csv
import time
from six import print_
FORMAT = "%(asctime)-15s -8s %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
OHLCTableDescription = {'sid': tables.StringCol(14, pos=2),
'dt': tables.Int64Col(pos=1),
'open': tables.Float64Col(dflt=np.NaN, pos=3),
'high': tables.Float64Col(dflt=np.NaN, pos=4),
'low': tables.Float64Col(dflt=np.NaN, pos=5),
'close': tables.Float64Col(dflt=np.NaN, pos=6),
"volume": tables.Int64Col(dflt=0, pos=7)}
def process_line(line):
dt = np.datetime64(line["dt"]).astype(np.int64)
sid = line["sid"]
open_p = float(line["open"])
high_p = float(line["high"])
low_p = float(line["low"])
close_p = float(line["close"])
volume = int(line["volume"])
return (dt, sid, open_p, high_p, low_p, close_p, volume)
def parse_csv(csv_reader):
previous_date = None
data = []
dtype = [('dt', 'int64'), ('sid', '|S14'), ('open', float),
('high', float), ('low', float), ('close', float),
('volume', int)]
for line in csv_reader:
row = process_line(line)
current_date = line["dt"][:10].replace("-", "")
if previous_date and previous_date != current_date:
rows = np.array(data, dtype=dtype).view(np.recarray)
yield current_date, rows
data = []
data.append(row)
previous_date = current_date
def merge_all_files_into_pytables(file_dir, file_out):
"""
process each file into pytables
"""
start = None
start = datetime.datetime.now()
out_h5 = tables.openFile(file_out,
mode="w",
title="bars",
filters=tables.Filters(complevel=9,
complib='zlib'))
table = None
for file_in in glob.glob(file_dir + "/*.gz"):
gzip_file = gzip.open(file_in)
expected_header = ["dt", "sid", "open", "high", "low", "close",
"volume"]
csv_reader = csv.DictReader(gzip_file)
header = csv_reader.fieldnames
if header != expected_header:
logging.warn("expected header %s\n" % (expected_header))
logging.warn("header_found %s" % (header))
return
for current_date, rows in parse_csv(csv_reader):
table = out_h5.createTable("/TD", "date_" + current_date,
OHLCTableDescription,
expectedrows=len(rows),
createparents=True)
table.append(rows)
table.flush()
if table is not None:
table.flush()
end = datetime.datetime.now()
diff = (end - start).seconds
logging.debug("finished it took %d." % (diff))
def create_fake_csv(file_in):
fields = ["dt", "sid", "open", "high", "low", "close", "volume"]
gzip_file = gzip.open(file_in, "w")
dict_writer = csv.DictWriter(gzip_file, fieldnames=fields)
current_dt = datetime.date.today() - datetime.timedelta(days=2)
current_dt = pd.Timestamp(current_dt).replace(hour=9)
current_dt = current_dt.replace(minute=30)
end_time = pd.Timestamp(datetime.date.today())
end_time = end_time.replace(hour=16)
last_price = 10.0
while current_dt < end_time:
row = {}
row["dt"] = current_dt
row["sid"] = "test"
last_price += random.randint(-20, 100) / 10000.0
row["close"] = last_price
row["open"] = last_price - 0.01
row["low"] = last_price - 0.02
row["high"] = last_price + 0.02
row["volume"] = random.randint(10, 1000) * 10
dict_writer.writerow(row)
current_dt += datetime.timedelta(minutes=1)
if current_dt.hour > 16:
current_dt += datetime.timedelta(days=1)
current_dt = current_dt.replace(hour=9)
current_dt = current_dt.replace(minute=30)
gzip_file.close()
def main(argv=None):
"""
This script cleans minute bars into pytables file
data_source_tables_gen.py
[--tz_in] sets time zone of data only reasonably fast way to use
time.tzset()
[--dir_in] iterates through directory provided of csv files in gzip form
in form:
dt, sid, open, high, low, close, volume
2012-01-01T12:30:30,1234HT,1, 2,3,4.0
[--fake_csv] creates a fake sample csv to iterate through
[--file_out] determines output file
"""
if argv is None:
argv = sys.argv
try:
dir_in = None
file_out = "./all.h5"
fake_csv = None
try:
opts, args = getopt.getopt(argv[1:], "hdft",
["help",
"dir_in=",
"debug",
"tz_in=",
"fake_csv=",
"file_out="])
except getopt.error as msg:
raise Usage(msg)
for opt, value in opts:
if opt in ("--help", "-h"):
print_(main.__doc__)
if opt in ("-d", "--debug"):
logging.basicConfig(format=FORMAT,
level=logging.DEBUG)
if opt in ("-d", "--dir_in"):
dir_in = value
if opt in ("-o", "--file_out"):
file_out = value
if opt in ("--fake_csv"):
fake_csv = value
if opt in ("--tz_in"):
os.environ['TZ'] = value
time.tzset()
try:
if dir_in:
merge_all_files_into_pytables(dir_in, file_out)
if fake_csv:
create_fake_csv(fake_csv)
except Exception:
error = "An unhandled error occured in the"
error += "data_source_tables_gen.py script."
error += "\n\nTraceback:\n"
error += '-' * 70 + "\n"
error += "".join(traceback.format_tb(sys.exc_info()[2]))
error += repr(sys.exc_info()[1]) + "\n"
error += str(sys.exc_info()[1]) + "\n"
error += '-' * 70 + "\n"
print_(error)
except Usage as err:
print_(err.msg)
print_("for help use --help")
return 2
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
dongsenfo/pymatgen | pymatgen/analysis/graphs.py | 3 | 111497 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import warnings
import subprocess
import numpy as np
import os.path
import copy
from itertools import combinations
from pymatgen.core import Structure, Lattice, PeriodicSite, Molecule
from pymatgen.core.structure import FunctionalGroups
from pymatgen.util.coord import lattice_points_in_supercell
from pymatgen.vis.structure_vtk import EL_COLORS
from monty.json import MSONable
from monty.os.path import which
from operator import itemgetter
from collections import namedtuple, defaultdict
from scipy.spatial import KDTree
from scipy.stats import describe
import networkx as nx
import networkx.algorithms.isomorphism as iso
from networkx.readwrite import json_graph
from networkx.drawing.nx_agraph import write_dot
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__author__ = "Matthew Horton, Evan Spotte-Smith"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "August 2017"
ConnectedSite = namedtuple('ConnectedSite', 'site, jimage, index, weight, dist')
class StructureGraph(MSONable):
"""
This is a class for annotating a Structure with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, structure, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given crystallographic
structure easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
For periodic graphs, class stores information on the graph
edges of what lattice image the edge belongs to.
:param structure: a Structure object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(structure, StructureGraph):
# just make a copy from input
graph_data = structure.as_dict()['graphs']
self.structure = structure
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if 'id' in d:
del d['id']
if 'key' in d:
del d['key']
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if 'to_jimage' in d:
d['to_jimage'] = tuple(d['to_jimage'])
if 'from_jimage' in d:
d['from_jimage'] = tuple(d['from_jimage'])
@classmethod
def with_empty_graph(cls, structure, name="bonds",
edge_weight_name=None,
edge_weight_units=None):
"""
Constructor for StructureGraph, returns a StructureGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Structure).
:param structure (Structure):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (StructureGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError("Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless.")
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name)
graph.add_nodes_from(range(len(structure)))
graph_data = json_graph.adjacency_data(graph)
return cls(structure, graph_data=graph_data)
@staticmethod
def with_edges(structure, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(from_index, to_index, from_image, to_image): props},
where props is a dictionary of properties, including weight.
Props should be None if no additional properties are to be
specified.
:return: sg, a StructureGraph
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds",
edge_weight_name="weight",
edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
from_image = edge[2]
to_image = edge[3]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index,"
" from_image, to_image) tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = sg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError("Edges cannot be added if nodes are not"
" present in the graph. Please check your"
" indices.")
sg.add_edge(from_index, to_index, from_jimage=from_image,
to_jimage=to_image, weight=weight,
edge_properties=props)
sg.set_node_attributes()
return sg
@staticmethod
def with_local_env_strategy(structure, strategy):
"""
Constructor for StructureGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param structure: Structure object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:return:
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds",
edge_weight_name="weight",
edge_weight_units="")
for n, neighbors in enumerate(strategy.get_all_nn_info(structure)):
for neighbor in neighbors:
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
sg.add_edge(from_index=n,
from_jimage=(0, 0, 0),
to_index=neighbor['site_index'],
to_jimage=neighbor['image'],
weight=neighbor['weight'],
warn_duplicates=False)
return sg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph['name']
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph['edge_weight_name']
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph['edge_weight_units']
def add_edge(self, from_index, to_index,
from_jimage=(0, 0, 0), to_jimage=None,
weight=None, warn_duplicates=True,
edge_properties=None):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param from_jimage (tuple of ints): lattice vector of periodic
image, e.g. (1, 0, 0) for periodic image in +x direction
:param to_jimage (tuple of ints): lattice vector of image
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
to_jimage, from_jimage = from_jimage, to_jimage
# constrain all from_jimages to be (0, 0, 0),
# initial version of this class worked even if
# from_jimage != (0, 0, 0), but making this
# assumption simplifies logic later
if not np.array_equal(from_jimage, (0, 0, 0)):
shift = from_jimage
from_jimage = np.subtract(from_jimage, shift)
to_jimage = np.subtract(to_jimage, shift)
# automatic detection of to_jimage if user doesn't specify
# will try and detect all equivalent images and add multiple
# edges if appropriate
if to_jimage is None:
# assume we want the closest site
warnings.warn("Please specify to_jimage to be unambiguous, "
"trying to automatically detect.")
dist, to_jimage = self.structure[from_index]\
.distance_and_image(self.structure[to_index])
if dist == 0:
# this will happen when from_index == to_index,
# typically in primitive single-atom lattices
images = [1, 0, 0], [0, 1, 0], [0, 0, 1]
dists = []
for image in images:
dists.append(self.structure[from_index]
.distance_and_image(self.structure[from_index],
jimage=image)[0])
dist = min(dists)
equiv_sites = self.structure.get_neighbors_in_shell(self.structure[from_index].coords,
dist,
dist*0.01,
include_index=True)
for site, dist, to_index in equiv_sites:
to_jimage = np.subtract(site.frac_coords, self.structure[from_index].frac_coords)
to_jimage = to_jimage.astype(int)
self.add_edge(from_index=from_index, from_jimage=(0, 0, 0),
to_jimage=to_jimage, to_index=to_index)
return
# sanitize types
from_jimage, to_jimage = tuple(map(int, from_jimage)), tuple(map(int, to_jimage))
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between a given (site, jimage) pair and another
# (site, jimage) pair
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data:
for key, d in existing_edge_data.items():
if d["to_jimage"] == to_jimage:
if warn_duplicates:
warnings.warn("Trying to add an edge that already exists from "
"site {} to site {} in {}.".format(from_index,
to_index,
to_jimage))
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index,
to_jimage=to_jimage,
weight=weight,
**edge_properties)
else:
self.graph.add_edge(from_index, to_index,
to_jimage=to_jimage,
**edge_properties)
def insert_node(self, i, species, coords, coords_are_cartesian=False,
validate_proximity=False, site_properties=None, edges=None):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param coords_are_cartesian: Whether coordinates are cartesian.
Defaults to False.
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.structure.insert(i, species, coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=site_properties)
mapping = {}
for j in range(len(self.structure) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(edge["from_index"], edge["to_index"],
from_jimage=(0, 0, 0),
to_jimage=edge["to_jimage"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None))
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Gives each node a "specie" and a "coords" attribute, updated with the
current species and coordinates.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.structure[node].specie.symbol
coords[node] = self.structure[node].coords
properties[node] = self.structure[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(self, from_index, to_index, to_jimage=None,
new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the StructureGraph.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edges = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edges:
raise ValueError("Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
))
if to_jimage is None:
edge_index = 0
else:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
if new_weight is not None:
self.graph[from_index][to_index][edge_index]['weight'] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):
"""
Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edges = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if to_jimage is None:
raise ValueError("Image must be supplied, to avoid ambiguity.")
if existing_edges:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(from_index, to_index, edge_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
for i, properties in existing_reverse.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(to_index, from_index, edge_index)
else:
raise ValueError("Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
))
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.structure.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def substitute_group(self, index, func_grp, strategy, bond_order=1,
graph_dict=None, strategy_params=None):
"""
Builds off of Structure.substitute to replace an atom in self.structure
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: Care must be taken to ensure that the functional group that is
substituted will not place atoms to close to each other, or violate the
dimensions of the Lattice.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.structure) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except:
raise RuntimeError("Can't find functional group in list. "
"Provide explicit coordinate instead")
self.structure.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "to_jimage" in edge_props.keys():
to_jimage = edge_props["to_jimage"]
del edge_props["to_jimage"]
else:
# By default, assume that all edges should stay remain
# inside the initial image
to_jimage = (0, 0, 0)
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v], to_jimage=to_jimage,
weight=weight, edge_properties=edge_props)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
for site in mapping.values():
neighbors = strat.get_nn_info(self.structure, site)
for neighbor in neighbors:
self.add_edge(from_index=site,
from_jimage=(0, 0, 0),
to_index=neighbor['site_index'],
to_jimage=neighbor['image'],
weight=neighbor['weight'],
warn_duplicates=False)
def get_connected_sites(self, n, jimage=(0, 0, 0)):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
connected_site_images = set()
out_edges = [(u, v, d, 'out') for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d, 'in') for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d, dir in out_edges + in_edges:
to_jimage = d['to_jimage']
if dir == 'in':
u, v = v, u
to_jimage = np.multiply(-1, to_jimage)
to_jimage = tuple(map(int, np.add(to_jimage, jimage)))
site_d = self.structure[v].as_dict()
site_d['abc'] = np.add(site_d['abc'], to_jimage).tolist()
site = PeriodicSite.from_dict(site_d)
# from_site if jimage arg != (0, 0, 0)
relative_jimage = np.subtract(to_jimage, jimage)
dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)
weight = d.get('weight', None)
if (v, to_jimage) not in connected_site_images:
connected_site = ConnectedSite(site=site,
jimage=to_jimage,
index=v,
weight=weight,
dist=dist)
connected_sites.add(connected_site)
connected_site_images.add((v, to_jimage))
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(self, filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp"):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires "
"GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587
+ c[2] * 0.114) / 255 < 0.5 else '#ffffff'
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label,
fontname="Helvetica-bold", style="filled", shape="circle")
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
to_image = d['to_jimage']
# set edge style
d['style'] = "solid"
if to_image != (0, 0, 0):
d['style'] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d['arrowhead'] = "none"
# only add labels for images that are not the origin
if image_labels:
d['headlabel'] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d['arrowhead'] = "normal" if d['headlabel'] else "none"
# optionally color edges using node colors
color_u = g.node[u]['fillcolor']
color_v = g.node[v]['fillcolor']
d['color_uv'] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get('edge_weight_units', "")
if d.get('weight'):
d['label'] = "{:.2f} {}".format(d['weight'], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d['to_jimage']) in diff['self']:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d['to_jimage']) in diff['other']:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({'color_uv': '#00ff00'})
for u, v, k in red_edges:
g.edges[u, v, k].update({'color_uv': '#ff0000'})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename+".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename+".dot"]
rs = subprocess.Popen(args,
stdout=f,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename+".dot")
@property
def types_and_weights_of_connections(self):
"""
Extract a dictionary summarizing the types and weights
of edges in the graph.
:return: A dictionary with keys specifying the
species involved in a connection in alphabetical order
(e.g. string 'Fe-O') and values which are a list of
weights for those connections (e.g. bond lengths).
"""
def get_label(u, v):
u_label = self.structure[u].species_string
v_label = self.structure[v].species_string
return "-".join(sorted((u_label, v_label)))
types = defaultdict(list)
for u, v, d in self.graph.edges(data=True):
label = get_label(u, v)
types[label].append(d['weight'])
return dict(types)
@property
def weight_statistics(self):
"""
Extract a statistical summary of edge weights present in
the graph.
:return: A dict with an 'all_weights' list, 'minimum',
'maximum', 'median', 'mean', 'std_dev'
"""
all_weights = [d.get('weight', None) for u, v, d
in self.graph.edges(data=True)]
stats = describe(all_weights, nan_policy='omit')
return {
'all_weights': all_weights,
'min': stats.minmax[0],
'max': stats.minmax[1],
'mean': stats.mean,
'variance': stats.variance
}
def types_of_coordination_environments(self, anonymous=False):
"""
Extract information on the different co-ordination environments
present in the graph.
:param anonymous: if anonymous, will replace specie names
with A, B, C, etc.
:return: a list of co-ordination environments,
e.g. ['Mo-S(6)', 'S-Mo(3)']
"""
motifs = set()
for idx, site in enumerate(self.structure):
centre_sp = site.species_string
connected_sites = self.get_connected_sites(idx)
connected_species = [connected_site.site.species_string
for connected_site in connected_sites]
labels = []
for sp in set(connected_species):
count = connected_species.count(sp)
labels.append((count, sp))
labels = sorted(labels, reverse=True)
if anonymous:
mapping = {centre_sp: 'A'}
available_letters = [chr(66+i) for i in range(25)]
for label in labels:
sp = label[1]
if sp not in mapping:
mapping[sp] = available_letters.pop(0)
centre_sp = 'A'
labels = [(label[0], mapping[label[1]]) for label in labels]
labels = ["{}({})".format(label[1], label[0]) for label in labels]
motif = '{}-{}'.format(centre_sp, ','.join(labels))
motifs.add(motif)
return sorted(list(motifs))
def as_dict(self):
"""
As in :Class: `pymatgen.core.Structure` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"graphs": json_graph.adjacency_data(self.graph)}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Structure` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
s = Structure.from_dict(d['structure'])
return cls(s, d['graphs'])
def __mul__(self, scaling_matrix):
"""
Replicates the graph, creating a supercell,
intelligently joining together
edges that lie on periodic boundaries.
In principle, any operations on the expanded
graph could also be done on the original
graph, but a larger graph can be easier to
visualize and reason about.
:param scaling_matrix: same as Structure.__mul__
:return:
"""
# Developer note: a different approach was also trialed, using
# a simple Graph (instead of MultiDiGraph), with node indices
# representing both site index and periodic image. Here, the
# number of nodes != number of sites in the Structure. This
# approach has many benefits, but made it more difficult to
# keep the graph in sync with its corresponding Structure.
# Broadly, it would be easier to multiply the Structure
# *before* generating the StructureGraph, but this isn't
# possible when generating the graph using critic2 from
# charge density.
# Multiplication works by looking for the expected position
# of an image node, and seeing if that node exists in the
# supercell. If it does, the edge is updated. This is more
# computationally expensive than just keeping track of the
# which new lattice images present, but should hopefully be
# easier to extend to a general 3x3 scaling matrix.
# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError('Not tested with 3x3 scaling matrices yet.')
new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
new_graphs = []
for v in c_lat:
# create a map of nodes from original graph to its image
mapping = {n: n + len(new_sites) for n in range(len(self.structure))}
for idx, site in enumerate(self.structure):
s = PeriodicSite(site.species, site.coords + v,
new_lattice, properties=site.properties,
coords_are_cartesian=True, to_unit_cell=False)
new_sites.append(s)
new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))
new_structure = Structure.from_sites(new_sites)
# merge all graphs into one big graph
new_g = nx.MultiDiGraph()
for new_graph in new_graphs:
new_g = nx.union(new_g, new_graph)
edges_to_remove = [] # tuple of (u, v, k)
edges_to_add = [] # tuple of (u, v, attr_dict)
# list of new edges inside supercell
# for duplicate checking
edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True)
if d['to_jimage'] == (0, 0, 0)]
new_periodic_images = []
orig_lattice = self.structure.lattice
# use k-d tree to match given position to an
# existing Site in Structure
kd_tree = KDTree(new_structure.cart_coords)
# tolerance in Å for sites to be considered equal
# this could probably be a lot smaller
tol = 0.05
for u, v, k, d in new_g.edges(keys=True, data=True):
to_jimage = d['to_jimage'] # for node v
# reduce unnecessary checking
if to_jimage != (0, 0, 0):
# get index in original site
n_u = u % len(self.structure)
n_v = v % len(self.structure)
# get fractional co-ordinates of where atoms defined
# by edge are expected to be, relative to original
# lattice (keeping original lattice has
# significant benefits)
v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)
u_frac = self.structure[n_u].frac_coords
# using the position of node u as a reference,
# get relative Cartesian co-ordinates of where
# atoms defined by edge are expected to be
v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)
u_cart = orig_lattice.get_cartesian_coords(u_frac)
v_rel = np.subtract(v_image_cart, u_cart)
# now retrieve position of node v in
# new supercell, and get asgolute Cartesian
# co-ordinates of where atoms defined by edge
# are expected to be
v_expec = new_structure[u].coords + v_rel
# now search in new structure for these atoms
# query returns (distance, index)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
# check if image sites now present in supercell
# and if so, delete old edge that went through
# periodic boundary
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
# node now inside supercell
new_d['to_jimage'] = (0, 0, 0)
edges_to_remove.append((u, v, k))
# make sure we don't try to add duplicate edges
# will remove two edges for everyone one we add
if {new_u, new_v} not in edges_inside_supercell:
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
edges_inside_supercell.append({new_u, new_v})
edges_to_add.append((new_u, new_v, new_d))
else:
# want to find new_v such that we have
# full periodic boundary conditions
# so that nodes on one side of supercell
# are connected to nodes on opposite side
v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)
# find new to_jimage
# use np.around to fix issues with finite precision leading to incorrect image
v_expec_image = np.around(v_expec_frac, decimals=3)
v_expec_image = v_expec_image - v_expec_image%1
v_expec_frac = np.subtract(v_expec_frac, v_expec_image)
v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
new_to_jimage = tuple(map(int, v_expec_image))
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
new_to_jimage = tuple(np.multiply(-1, d['to_jimage']).astype(int))
new_d['to_jimage'] = new_to_jimage
edges_to_remove.append((u, v, k))
if (new_u, new_v, new_to_jimage) not in new_periodic_images:
edges_to_add.append((new_u, new_v, new_d))
new_periodic_images.append((new_u, new_v, new_to_jimage))
logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove),
len(edges_to_add)))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
new_g.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
new_g.add_edge(u, v, **d)
# return new instance of StructureGraph with supercell
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": new_structure.as_dict(),
"graphs": json_graph.adjacency_data(new_g)}
sg = StructureGraph.from_dict(d)
return sg
def __rmul__(self, other):
return self.__mul__(other)
def _edges_to_string(self, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-"*max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0,1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))),
data.get("weight", 0))
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v,
str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Structure / number of nodes in graph
"""
return len(self.structure)
def sort(self, key=None, reverse=False):
"""
Same as Structure.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_structure = self.structure.copy()
# sort Structure
self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)
# apply Structure ordering to graph
mapping = {idx:self.structure.index(site) for idx, site in enumerate(old_structure)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d['to_jimage'] = tuple(np.multiply(-1, d['to_jimage']).astype(int))
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return StructureGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two StructureGraphs are equal if they have equal Structures,
and have the same edges between Sites. Edge weights can be
different and StructureGraphs can still be considered equal.
:param other: StructureGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d['to_jimage'])
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d['to_jimage'])
for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and \
(self.structure == other_sorted.structure)
def diff(self, other, strict=True):
"""
Compares two StructureGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one StructureGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Specie strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if "
"corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d['to_jimage'])
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d['to_jimage'])
for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {(str(self.structure[u].specie),
str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(str(other.structure[u].specie),
str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
'self': edges - edges_other,
'other': edges_other - edges,
'both': edges.intersection(edges_other),
'dist': jaccard_dist
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, '_supercell_sg', None) is None:
self._supercell_sg = supercell_sg = self*(3,3,3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph))
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
molecule_subgraphs.append(subgraph)
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1['specie'] == n2['specie']
def edge_match(e1, e2):
if use_weights:
return e1['weight'] == e2['weight']
else:
return True
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=node_match,
edge_match=edge_match)
for g in unique_subgraphs]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
class MolGraphSplitError(Exception):
# Raised when a molecule graph is failed to split into two disconnected
# subgraphs
pass
class MoleculeGraph(MSONable):
"""
This is a class for annotating a Molecule with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, molecule, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given molecule easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
:param molecule: Molecule object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(molecule, MoleculeGraph):
# just make a copy from input
graph_data = molecule.as_dict()['graphs']
self.molecule = molecule
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if 'id' in d:
del d['id']
if 'key' in d:
del d['key']
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if 'to_jimage' in d:
d['to_jimage'] = tuple(d['to_jimage'])
if 'from_jimage' in d:
d['from_jimage'] = tuple(d['from_jimage'])
self.set_node_attributes()
@classmethod
def with_empty_graph(cls, molecule, name="bonds",
edge_weight_name=None,
edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError("Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless.")
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data)
@staticmethod
def with_edges(molecule, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. Props should be None if no
additional properties are to be specified.
:return: mg, a MoleculeGraph
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds",
edge_weight_name="weight",
edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index)"
"tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = mg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError("Edges cannot be added if nodes are not"
" present in the graph. Please check your"
" indices.")
mg.add_edge(from_index, to_index, weight=weight,
edge_properties=props)
mg.set_node_attributes()
return mg
@staticmethod
def with_local_env_strategy(molecule, strategy, reorder=True,
extend_structure=True):
"""
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:param reorder: bool, representing if graph nodes need to be reordered
following the application of the local_env strategy
:param extend_structure: If True (default), then a large artificial box
will be placed around the Molecule, because some strategies assume
periodic boundary conditions.
:return: mg, a MoleculeGraph
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds",
edge_weight_name="weight",
edge_weight_units="")
# NearNeighbor classes only (generally) work with structures
# molecules have to be boxed first
coords = molecule.cart_coords
if extend_structure:
a = max(coords[:, 0]) - min(coords[:, 0]) + 100
b = max(coords[:, 1]) - min(coords[:, 1]) + 100
c = max(coords[:, 2]) - min(coords[:, 2]) + 100
molecule = molecule.get_boxed_structure(a, b, c, no_cross=True)
for n in range(len(molecule)):
neighbors = strategy.get_nn_info(molecule, n)
for neighbor in neighbors:
# all bonds in molecules should not cross
# (artificial) periodic boundaries
if not np.array_equal(neighbor['image'], [0, 0, 0]):
continue
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
mg.add_edge(from_index=n,
to_index=neighbor['site_index'],
weight=neighbor['weight'],
warn_duplicates=False)
if reorder:
# Reverse order of nodes to match with molecule
n = len(mg.molecule)
mapping = {i: (n-i) for i in range(n)}
mapping = {i: (j-1) for i, j in mapping.items()}
mg.graph = nx.relabel_nodes(mg.graph, mapping)
duplicates = []
for edge in mg.graph.edges:
if edge[2] != 0:
duplicates.append(edge)
for duplicate in duplicates:
mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2])
mg.set_node_attributes()
return mg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph['name']
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph['edge_weight_name']
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph['edge_weight_units']
def add_edge(self, from_index, to_index,
weight=None, warn_duplicates=True,
edge_properties=None):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
# sanitize types
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between two sites
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data and warn_duplicates:
warnings.warn("Trying to add an edge that already exists from "
"site {} to site {}.".format(from_index,
to_index))
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index,
weight=weight,
**edge_properties)
else:
self.graph.add_edge(from_index, to_index,
**edge_properties)
def insert_node(self, i, species, coords, validate_proximity=False,
site_properties=None, edges=None):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.molecule.insert(i, species, coords,
validate_proximity=validate_proximity,
properties=site_properties)
mapping = {}
for j in range(len(self.molecule) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(edge["from_index"], edge["to_index"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None))
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Replicates molecule site properties (specie, coords, etc.) in the
MoleculeGraph.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.molecule[node].specie.symbol
coords[node] = self.molecule[node].coords
properties[node] = self.molecule[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(self, from_index, to_index,
new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edge = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edge:
raise ValueError("Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
))
# Third index should always be 0 because there should only be one edge between any two nodes
if new_weight is not None:
self.graph[from_index][to_index][0]['weight'] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, allow_reverse=False):
"""
Remove an edge from the MoleculeGraph
:param from_index: int
:param to_index: int
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edge = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if existing_edge:
self.graph.remove_edge(from_index, to_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index,
from_index)
if existing_reverse:
self.graph.remove_edge(to_index, from_index)
else:
raise ValueError("Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
))
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.molecule.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def split_molecule_subgraphs(self, bonds, allow_reverse=False,
alterations=None):
"""
Split MoleculeGraph into two or more MoleculeGraphs by
breaking a set of bonds. This function uses
MoleculeGraph.break_edge repeatedly to create
disjoint graphs (two or more separate molecules).
This function does not only alter the graph
information, but also changes the underlying
Moledules.
If the bonds parameter does not include sufficient
bonds to separate two molecule fragments, then this
function will fail.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:param bonds: list of tuples (from_index, to_index)
representing bonds to be broken to split the MoleculeGraph.
:param alterations: a dict {(from_index, to_index): alt},
where alt is a dictionary including weight and/or edge
properties to be changed following the split.
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: list of MoleculeGraphs
"""
self.set_node_attributes()
original = copy.deepcopy(self)
for bond in bonds:
original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)
if nx.is_weakly_connected(original.graph):
raise MolGraphSplitError("Cannot split molecule; \
MoleculeGraph is still connected.")
else:
# alter any bonds before partition, to avoid remapping
if alterations is not None:
for (u, v) in alterations.keys():
if "weight" in alterations[(u, v)]:
weight = alterations[(u, v)]["weight"]
del alterations[(u, v)]["weight"]
edge_properties = alterations[(u, v)] \
if len(alterations[(u, v)]) != 0 else None
original.alter_edge(u, v, new_weight=weight,
new_edge_properties=edge_properties)
else:
original.alter_edge(u, v,
new_edge_properties=alterations[(u, v)])
sub_mols = []
# Had to use nx.weakly_connected_components because of deprecation
# of nx.weakly_connected_component_subgraphs
components = nx.weakly_connected_components(original.graph)
subgraphs = [original.graph.subgraph(c) for c in components]
for subg in subgraphs:
nodes = sorted(list(subg.nodes))
# Molecule indices are essentially list-based, so node indices
# must be remapped, incrementing from 0
mapping = {}
for i in range(len(nodes)):
mapping[nodes[i]] = i
# just give charge to whatever subgraph has node with index 0
# TODO: actually figure out how to distribute charge
if 0 in nodes:
charge = self.molecule.charge
else:
charge = 0
# relabel nodes in graph to match mapping
new_graph = nx.relabel_nodes(subg, mapping)
species = nx.get_node_attributes(new_graph, "specie")
coords = nx.get_node_attributes(new_graph, "coords")
raw_props = nx.get_node_attributes(new_graph, "properties")
properties = {}
for prop_set in raw_props.values():
for prop in prop_set.keys():
if prop in properties:
properties[prop].append(prop_set[prop])
else:
properties[prop] = [prop_set[prop]]
# Site properties must be present for all atoms in the molecule
# in order to be used for Molecule instantiation
for k, v in properties.items():
if len(v) != len(species):
del properties[k]
new_mol = Molecule(species, coords, charge=charge,
site_properties=properties)
graph_data = json_graph.adjacency_data(new_graph)
# create new MoleculeGraph
sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))
return sub_mols
def build_unique_fragments(self):
"""
Find all possible fragment combinations of the MoleculeGraphs (in other
words, all connected induced subgraphs)
:return:
"""
self.set_node_attributes()
graph = self.graph.to_undirected()
nm = iso.categorical_node_match("specie", "ERROR")
# find all possible fragments, aka connected induced subgraphs
all_fragments = []
for ii in range(1, len(self.molecule)):
for combination in combinations(graph.nodes, ii):
subgraph = nx.subgraph(graph, combination)
if nx.is_connected(subgraph):
all_fragments.append(subgraph)
# narrow to all unique fragments using graph isomorphism
unique_fragments = []
for fragment in all_fragments:
if not [nx.is_isomorphic(fragment, f, node_match=nm)
for f in unique_fragments].count(True) >= 1:
unique_fragments.append(fragment)
# convert back to molecule graphs
unique_mol_graphs = []
for fragment in unique_fragments:
mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))}
remapped = nx.relabel_nodes(fragment, mapping)
species = nx.get_node_attributes(remapped, "specie")
coords = nx.get_node_attributes(remapped, "coords")
edges = {}
for from_index, to_index, key in remapped.edges:
edge_props = fragment.get_edge_data(from_index, to_index, key=key)
edges[(from_index, to_index)] = edge_props
unique_mol_graphs.append(self.with_edges(Molecule(species=species,
coords=coords,
charge=self.molecule.charge),
edges))
return unique_mol_graphs
def substitute_group(self, index, func_grp, strategy, bond_order=1,
graph_dict=None, strategy_params=None, reorder=True,
extend_structure=True):
"""
Builds off of Molecule.substitute to replace an atom in self.molecule
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: using a MoleculeGraph will generally produce a different graph
compared with using a Molecule or str (when not using graph_dict).
This is because of the reordering that occurs when using some of the
local_env strategies.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:param reorder: bool, representing if graph nodes need to be reordered
following the application of the local_env strategy
:param extend_structure: If True (default), then a large artificial box
will be placed around the Molecule, because some strategies assume
periodic boundary conditions.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.molecule) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
# Work is simplified if a graph is already in place
if isinstance(func_grp, MoleculeGraph):
self.molecule.substitute(index, func_grp.molecule,
bond_order=bond_order)
mapping = map_indices(func_grp.molecule)
for (u, v) in list(func_grp.graph.edges()):
edge_props = func_grp.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v],
weight=weight, edge_properties=edge_props)
else:
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except:
raise RuntimeError("Can't find functional group in list. "
"Provide explicit coordinate instead")
self.molecule.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v],
weight=weight, edge_properties=edge_props)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
graph = self.with_local_env_strategy(func_grp, strat, reorder=reorder,
extend_structure=extend_structure)
for (u, v) in list(graph.graph.edges()):
edge_props = graph.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
if 0 not in list(graph.graph.nodes()):
# If graph indices have different indexing
u, v = (u-1), (v-1)
self.add_edge(mapping[u], mapping[v],
weight=weight, edge_properties=edge_props)
def replace_group(self, index, func_grp, strategy, bond_order=1,
graph_dict=None, strategy_params=None, reorder=True,
extend_structure=True):
"""
Builds off of Molecule.substitute and MoleculeGraph.substitute_group
to replace a functional group in self.molecule with a functional group.
This method also amends self.graph to incorporate the new functional
group.
TODO: Figure out how to replace into a ring structure.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:param reorder: bool, representing if graph nodes need to be reordered
following the application of the local_env strategy
:param extend_structure: If True (default), then a large artificial box
will be placed around the Molecule, because some strategies assume
periodic boundary conditions.
:return:
"""
self.set_node_attributes()
neighbors = self.get_connected_sites(index)
# If the atom at index is terminal
if len(neighbors) == 1:
self.substitute_group(index, func_grp, strategy,
bond_order=bond_order, graph_dict=graph_dict,
strategy_params=strategy_params,
reorder=reorder,
extend_structure=extend_structure)
else:
rings = self.find_rings(including=[index])
if len(rings) != 0:
raise RuntimeError("Currently functional group replacement"
"cannot occur at an atom within a ring"
"structure.")
to_remove = set()
sizes = dict()
disconnected = self.graph.to_undirected()
disconnected.remove_node(index)
for neighbor in neighbors:
sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))
keep = max(sizes, key=lambda x: sizes[x])
for i in sizes.keys():
if i != keep:
to_remove.add(i)
self.remove_nodes(list(to_remove))
self.substitute_group(index, func_grp, strategy,
bond_order=bond_order, graph_dict=graph_dict,
strategy_params=strategy_params,
reorder=reorder,
extend_structure=extend_structure)
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i-1], e))
cycles_edges.append(edges)
return cycles_edges
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = [(u, v, d) for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d) for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d in out_edges + in_edges:
weight = d.get('weight', None)
if v == n:
site = self.molecule[u]
dist = self.molecule[v].distance(self.molecule[u])
connected_site = ConnectedSite(site=site,
jimage=(0, 0, 0),
index=u,
weight=weight,
dist=dist)
else:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
connected_site = ConnectedSite(site=site,
jimage=(0, 0, 0),
index=v,
weight=weight,
dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(self, filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp"):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires "
"GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587
+ c[2] * 0.114) / 255 < 0.5 else '#ffffff'
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label,
fontname="Helvetica-bold", style="filled", shape="circle")
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
if "to_image" in d:
to_image = d['to_jimage']
else:
to_image = (0, 0, 0)
# set edge style
d['style'] = "solid"
if to_image != (0, 0, 0):
d['style'] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d['arrowhead'] = "none"
# only add labels for images that are not the origin
if image_labels:
d['headlabel'] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d['arrowhead'] = "normal" if d['headlabel'] else "none"
# optionally color edges using node colors
color_u = g.node[u]['fillcolor']
color_v = g.node[v]['fillcolor']
d['color_uv'] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get('edge_weight_units', "")
if d.get('weight'):
d['label'] = "{:.2f} {}".format(d['weight'], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d['to_jimage']) in diff['self']:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d['to_jimage']) in diff['other']:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({'color_uv': '#00ff00'})
for u, v, k in red_edges:
g.edges[u, v, k].update({'color_uv': '#ff0000'})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename+".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename+".dot"]
rs = subprocess.Popen(args,
stdout=f,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename+".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"graphs": json_graph.adjacency_data(self.graph)}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Molecule` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
m = Molecule.from_dict(d['molecule'])
return cls(m, d['graphs'])
def _edges_to_string(self, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-"*max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))),
data.get("weight", 0))
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v,
str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Molecule / number of nodes in graph
"""
return len(self.molecule)
def sort(self, key=None, reverse=False):
"""
Same as Molecule.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_molecule = self.molecule.copy()
# sort Molecule
self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)
# apply Molecule ordering to graph
mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d['to_jimage'] = (0, 0, 0)
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return MoleculeGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two MoleculeGraphs are equal if they have equal Molecules,
and have the same edges between Sites. Edge weights can be
different and MoleculeGraphs can still be considered equal.
:param other: MoleculeGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
try:
mapping = {tuple(site.coords):self.molecule.index(site) for site in other.molecule}
except ValueError:
return False
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])
edges = {(u, v)
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and \
(self.molecule == other_sorted.molecule)
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
:param other: MoleculeGraph object to be compared.
:return: bool
"""
if self.molecule.composition != other.molecule.composition:
return False
else:
self_undir = self.graph.to_undirected()
other_undir = other.graph.to_undirected()
nm = iso.categorical_node_match("specie", "ERROR")
isomorphic = nx.is_isomorphic(self_undir, other_undir, node_match=nm)
return isomorphic
def diff(self, other, strict=True):
"""
Compares two MoleculeGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one MoleculeGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two MoleculeGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the MoleculeGraph this method is called
from, not the 'other' MoleculeGraph: there
is no guarantee the node indices will be the
same if the underlying Molecules are ordered
differently.
:param other: MoleculeGraph
:param strict: if False, will compare bonds
from different Molecules, with node indices
replaced by Specie strings, will not count
number of occurrences of bonds
:return:
"""
if self.molecule != other.molecule and strict:
return ValueError("Meaningless to compare MoleculeGraphs if "
"corresponding Molecules are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords):self.molecule.index(site) for site in other.molecule}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d.get('to_jimage', (0, 0, 0)))
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d.get('to_jimage', (0, 0, 0)))
for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {(str(self.molecule[u].specie),
str(self.molecule[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(str(other.structure[u].specie),
str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
'self': edges - edges_other,
'other': edges_other - edges,
'both': edges.intersection(edges_other),
'dist': jaccard_dist
}
| mit |
alshedivat/tensorflow | tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py | 16 | 13781 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import urllib
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
layers = tf.keras.layers
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not tf.gfile.Exists(work_directory):
tf.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = maybe_download(os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tf.keras.Model):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
self.cells = tf.contrib.checkpoint.List(
[tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes])
self.relu = layers.Dense(
label_dimension, activation=tf.nn.relu, name="relu")
def call(self, inputs, training=False):
"""Implements the RNN logic and prediction generation.
Args:
inputs: A tuple (chars, sequence_length), where chars is a batch of
one-hot encoded color names represented as a Tensor with dimensions
[batch_size, time_steps, 256] and sequence_length holds the length
of each character sequence (color name) as a Tensor with dimension
[batch_size].
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
(chars, sequence_length) = inputs
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.square(predictions - labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model((chars, sequence_length), training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
Team-T2-NMR/NMR-T2 | readinput.py | 1 | 1550 | import numpy
import scipy
import sklearn
#filename = 'input_data_1.txt'
#matrix = []
#input_matrix = []
#----------------------------------------
#matrix[i] is the different spectrum |
#matrix[i][0] is Spectrum ID |
#matrix[i][1] is Database ID |
#matrix[i][2] is InChi Key |
#matrix[i][3] is Solvent Sample |
#matrix[i][4] is pH |
#matrix[i][5] is Frequency |
#matrix[i][7] is Atom number |
#matrix[i][8] is Shift |
#----------------------------------------
def open_file_init(filename):
matrix = []
f = open(filename,'r')
text1 = f.readlines()
filesize = len(text1)
#print len(text1)
for i in range(filesize):
matrix.append(text1[i].split())
#print(matrix)
proton_size = len(matrix[i])
for i in range(filesize):
#print matrix[i]
for k in range(proton_size):
if (k+1 == proton_size):
break
#print matrix[i][k+1]
matrix[i][k+1] = float(matrix[i][k+1])
#matrix[i][k+1] = format(matrix[i][k+1],'.8f')
print matrix[i]
#print matrix
return matrix
#open_file_init(filename)
# for i in range(20):
# temp = []
#print matrix[i][5]
# temp.append(int(matrix[i][5]))
# temp.append(int(matrix[i][7]))
# train_matrix.append(temp)
# #train_matrix.append(temp)
# print train_matrix
# print len(train_matrix)
#def classify():
# k =1
# | gpl-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/frame/test_nonunique_indexes.py | 1 | 18506 | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pytest
from pandas.compat import lrange, u
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestDataFrameNonuniqueIndexes(TestData):
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = lrange(2)
df = DataFrame(arr, columns=['A', 'A'])
df.columns = idx
expected = DataFrame(arr, columns=idx)
check(df, expected)
idx = date_range('20130101', periods=4, freq='Q-NOV')
df = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=['a', 'a', 'a', 'a'])
df.columns = idx
expected = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
check(df, expected)
# insert
df = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=['foo', 'bar', 'foo', 'hello'])
df['string'] = 'bah'
expected = DataFrame([[1, 1, 1, 5, 'bah'], [1, 1, 2, 5, 'bah'],
[2, 1, 3, 5, 'bah']],
columns=['foo', 'bar', 'foo', 'hello', 'string'])
check(df, expected)
with pytest.raises(ValueError, match='Length of value'):
df.insert(0, 'AnotherColumn', range(len(df.index) - 1))
# insert same dtype
df['foo2'] = 3
expected = DataFrame([[1, 1, 1, 5, 'bah', 3], [1, 1, 2, 5, 'bah', 3],
[2, 1, 3, 5, 'bah', 3]],
columns=['foo', 'bar', 'foo', 'hello',
'string', 'foo2'])
check(df, expected)
# set (non-dup)
df['foo2'] = 4
expected = DataFrame([[1, 1, 1, 5, 'bah', 4], [1, 1, 2, 5, 'bah', 4],
[2, 1, 3, 5, 'bah', 4]],
columns=['foo', 'bar', 'foo', 'hello',
'string', 'foo2'])
check(df, expected)
df['foo2'] = 3
# delete (non dup)
del df['bar']
expected = DataFrame([[1, 1, 5, 'bah', 3], [1, 2, 5, 'bah', 3],
[2, 3, 5, 'bah', 3]],
columns=['foo', 'foo', 'hello', 'string', 'foo2'])
check(df, expected)
# try to delete again (its not consolidated)
del df['hello']
expected = DataFrame([[1, 1, 'bah', 3], [1, 2, 'bah', 3],
[2, 3, 'bah', 3]],
columns=['foo', 'foo', 'string', 'foo2'])
check(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame([[1, 1, 'bah', 3], [1, 2, 'bah', 3],
[2, 3, 'bah', 3]],
columns=['foo', 'foo', 'string', 'foo2'])
check(df, expected)
# insert
df.insert(2, 'new_col', 5.)
expected = DataFrame([[1, 1, 5., 'bah', 3], [1, 2, 5., 'bah', 3],
[2, 3, 5., 'bah', 3]],
columns=['foo', 'foo', 'new_col', 'string',
'foo2'])
check(df, expected)
# insert a dup
with pytest.raises(ValueError, match='cannot insert'):
df.insert(2, 'new_col', 4.)
df.insert(2, 'new_col', 4., allow_duplicates=True)
expected = DataFrame([[1, 1, 4., 5., 'bah', 3],
[1, 2, 4., 5., 'bah', 3],
[2, 3, 4., 5., 'bah', 3]],
columns=['foo', 'foo', 'new_col',
'new_col', 'string', 'foo2'])
check(df, expected)
# delete (dup)
del df['foo']
expected = DataFrame([[4., 5., 'bah', 3], [4., 5., 'bah', 3],
[4., 5., 'bah', 3]],
columns=['new_col', 'new_col', 'string', 'foo2'])
assert_frame_equal(df, expected)
# dup across dtypes
df = DataFrame([[1, 1, 1., 5], [1, 1, 2., 5], [2, 1, 3., 5]],
columns=['foo', 'bar', 'foo', 'hello'])
check(df)
df['foo2'] = 7.
expected = DataFrame([[1, 1, 1., 5, 7.], [1, 1, 2., 5, 7.],
[2, 1, 3., 5, 7.]],
columns=['foo', 'bar', 'foo', 'hello', 'foo2'])
check(df, expected)
result = df['foo']
expected = DataFrame([[1, 1.], [1, 2.], [2, 3.]],
columns=['foo', 'foo'])
check(result, expected)
# multiple replacements
df['foo'] = 'string'
expected = DataFrame([['string', 1, 'string', 5, 7.],
['string', 1, 'string', 5, 7.],
['string', 1, 'string', 5, 7.]],
columns=['foo', 'bar', 'foo', 'hello', 'foo2'])
check(df, expected)
del df['foo']
expected = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]], columns=[
'bar', 'hello', 'foo2'])
check(df, expected)
# values
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=['x', 'x'])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
# rename, GH 4403
df4 = DataFrame(
{'RT': [0.0454],
'TClose': [22.02],
'TExg': [0.0422]},
index=MultiIndex.from_tuples([(600809, 20130331)],
names=['STK_ID', 'RPT_Date']))
df5 = DataFrame({'RPT_Date': [20120930, 20121231, 20130331],
'STK_ID': [600809] * 3,
'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],
'TClose': [38.05, 41.66, 30.01]},
index=MultiIndex.from_tuples(
[(600809, 20120930),
(600809, 20121231),
(600809, 20130331)],
names=['STK_ID', 'RPT_Date']))
k = pd.merge(df4, df5, how='inner', left_index=True, right_index=True)
result = k.rename(
columns={'TClose_x': 'TClose', 'TClose_y': 'QT_Close'})
str(result)
result.dtypes
expected = (DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809,
u('饡驦'), 30.01]],
columns=['RT', 'TClose', 'TExg',
'RPT_Date', 'STK_ID', 'STK_Name',
'QT_Close'])
.set_index(['STK_ID', 'RPT_Date'], drop=False))
assert_frame_equal(result, expected)
# reindex is invalid!
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
columns=['bar', 'a', 'a'])
pytest.raises(ValueError, df.reindex, columns=['bar'])
pytest.raises(ValueError, df.reindex, columns=['bar', 'foo'])
# drop
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
columns=['bar', 'a', 'a'])
result = df.drop(['a'], axis=1)
expected = DataFrame([[1], [1], [1]], columns=['bar'])
check(result, expected)
result = df.drop('a', axis=1)
check(result, expected)
# describe
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['bar', 'a', 'a'], dtype='float64')
result = df.describe()
s = df.iloc[:, 0].describe()
expected = pd.concat([s, s, s], keys=df.columns, axis=1)
check(result, expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'A'])
for index in [df.index, pd.Index(list('edcba'))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame({'A': expected_ser,
'B': this_df['B'],
'A': expected_ser},
columns=['A', 'B', 'A'])
this_df['A'] = index
check(this_df, expected_df)
# operations
for op in ['__add__', '__mul__', '__sub__', '__truediv__']:
df = DataFrame(dict(A=np.arange(10), B=np.random.rand(10)))
expected = getattr(df, op)(df)
expected.columns = ['A', 'A']
df.columns = ['A', 'A']
result = getattr(df, op)(df)
check(result, expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5, 2), columns=['that', 'that'])
expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])
df['that'] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5, 2), columns=['that', 'that'])
expected = DataFrame(1, index=range(5), columns=['that', 'that'])
df['that'] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame({'A': np.random.randn(5),
'B': np.random.randn(5),
'C': np.random.randn(5),
'D': ['a', 'b', 'c', 'd', 'e']})
expected = df.take([0, 1, 1], axis=1)
df2 = df.take([2, 0, 1, 2, 1], axis=1)
result = df2.drop('C', axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame({'A': np.random.randn(5),
'B': np.random.randn(5),
'C': np.random.randn(5),
'D': ['a', 'b', 'c', 'd', 'e']})
df.iloc[2, [0, 1, 2]] = np.nan
df.iloc[0, 0] = np.nan
df.iloc[1, 1] = np.nan
df.iloc[:, 3] = np.nan
expected = df.dropna(subset=['A', 'B', 'C'], how='all')
expected.columns = ['A', 'A', 'B', 'C']
df.columns = ['A', 'A', 'B', 'C']
result = df.dropna(subset=['A', 'C'], how='all')
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ['A', 'A', 'C', 'D']
df = DataFrame(np.arange(12).reshape(3, 4), columns=[
'A', 'B', 'C', 'D'], dtype='float64')
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
result = df[df.C > 6]
check(result, expected)
# where
df = DataFrame(np.arange(12).reshape(3, 4), columns=[
'A', 'B', 'C', 'D'], dtype='float64')
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
result = df[df > 6]
check(result, expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
pytest.raises(ValueError, lambda: df[df.A > 6])
# dup aligining operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])
result = df1.sub(df2)
assert_frame_equal(result, expected)
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]],
columns=['A', 'B'])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]],
columns=['A', 'A'])
# not-comparing like-labelled
pytest.raises(ValueError, lambda: df1 == df2)
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame([[False, True], [True, False], [False, False], [
True, False]], columns=['A', 'A'])
assert_frame_equal(result, expected)
# mixed column selection
# GH 5639
dfbool = DataFrame({'one': Series([True, True, False],
index=['a', 'b', 'c']),
'two': Series([False, False, True, False],
index=['a', 'b', 'c', 'd']),
'three': Series([False, True, True, True],
index=['a', 'b', 'c', 'd'])})
expected = pd.concat(
[dfbool['one'], dfbool['three'], dfbool['one']], axis=1)
result = dfbool[['one', 'three', 'one']]
check(result, expected)
# multi-axis dups
# GH 6121
df = DataFrame(np.arange(25.).reshape(5, 5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']].copy()
expected = z.loc[['a', 'c', 'a']]
df = DataFrame(np.arange(25.).reshape(5, 5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']]
result = z.loc[['a', 'c', 'a']]
check(result, expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': np.arange(1, 6, dtype='int64')},
index=[2, 2, 3, 3, 4])
result = df.B - df.A
expected = Series(1, index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
df = DataFrame({'A': date_range('20130101', periods=5),
'B': date_range('20130101 09:00:00', periods=5)},
index=[2, 2, 3, 3, 4])
result = df.B - df.A
expected = Series(pd.Timedelta('9 hours'), index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=['a', 'a'])
df.columns = ['a', 'a.1']
str(df)
expected = DataFrame([[1, 2]], columns=['a', 'a.1'])
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3]], columns=['b', 'a', 'a'])
df.columns = ['b', 'a', 'a.1']
str(df)
expected = DataFrame([[1, 2, 3]], columns=['b', 'a', 'a.1'])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1, 2]], columns=['a', 'a'])
df.columns = ['b', 'b']
str(df)
expected = DataFrame([[1, 2]], columns=['b', 'b'])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=['a', 'a', 'b', 'b', 'd', 'c', 'c'])
df.columns = list('ABCDEFG')
str(df)
expected = DataFrame(
[[1, 2, 1., 2., 3., 'foo', 'bar']], columns=list('ABCDEFG'))
assert_frame_equal(df, expected)
# this is an error because we cannot disambiguate the dup columns
pytest.raises(Exception, lambda x: DataFrame(
[[1, 2, 'foo', 'bar']], columns=['a', 'a', 'a', 'a']))
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3), dtype='float64')
df_int = DataFrame(np.random.randn(10, 3), dtype='int64')
df_bool = DataFrame(True, index=df_float.index,
columns=df_float.columns)
df_object = DataFrame('foo', index=df_float.index,
columns=df_float.columns)
df_dt = DataFrame(pd.Timestamp('20010101'),
index=df_float.index,
columns=df_float.columns)
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
assert len(df._data._blknos) == len(df.columns)
assert len(df._data._blklocs) == len(df.columns)
# testing iloc
for i in range(len(df.columns)):
df.iloc[:, i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.], [2, -2, 3.]]
rs = DataFrame(vals, columns=['A', 'A', 'B'])
xp = DataFrame(vals)
xp.columns = ['A', 'A', 'B']
assert_frame_equal(rs, xp)
def test_values_duplicates(self):
df = DataFrame([[1, 2, 'a', 'b'],
[1, 2, 'a', 'b']],
columns=['one', 'one', 'two', 'two'])
result = df.values
expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],
dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_set_value_by_index(self):
# See gh-12344
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = list('AAA')
expected = df.iloc[:, 2]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 2], expected)
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = [2, float(2), str(2)]
expected = df.iloc[:, 1]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 1], expected)
def test_insert_with_columns_dups(self):
# GH 14291
df = pd.DataFrame()
df.insert(0, 'A', ['g', 'h', 'i'], allow_duplicates=True)
df.insert(0, 'A', ['d', 'e', 'f'], allow_duplicates=True)
df.insert(0, 'A', ['a', 'b', 'c'], allow_duplicates=True)
exp = pd.DataFrame([['a', 'd', 'g'], ['b', 'e', 'h'],
['c', 'f', 'i']], columns=['A', 'A', 'A'])
assert_frame_equal(df, exp)
| bsd-3-clause |
caseyclements/bokeh | examples/compat/mpl/listcollection.py | 34 | 1602 | from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
def make_segments(x, y):
'''
Create list of line segments from x and y coordinates.
'''
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def colorline(x, y, colors=None, linewidth=3, alpha=1.0):
'''
Plot a line with segments.
Optionally, specify segments colors and segments widths.
'''
# Make a list of colors cycling through the rgbcmyk series.
# You have several ways to input the colors:
# colors = ['r','g','b','c','y','m','k']
# colors = ['red','green','blue','cyan','yellow','magenta','black']
# colors = ['#ff0000', '#008000', '#0000ff', '#00bfbf', '#bfbf00', '#bf00bf', '#000000']
# colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0), (0.0, 0.75, 0.75, 1.0),
# (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0), (0.0, 0.0, 0.0, 1.0)]
colors = ['r', 'g', 'b', 'c', 'y', 'm', 'k']
widths = [5, 10, 20, 40, 20, 10, 5]
segments = make_segments(x, y)
lc = LineCollection(segments, colors=colors, linewidth=widths, alpha=alpha)
ax = plt.gca()
ax.add_collection(lc)
return lc
# Colored sine wave
x = np.linspace(0, 4 * np.pi, 100)
y = np.sin(x)
colorline(x, y)
plt.title("MPL support for ListCollection in Bokeh")
plt.xlim(x.min(), x.max())
plt.ylim(-1.0, 1.0)
output_file("listcollection.html")
show(mpl.to_bokeh())
| bsd-3-clause |
cybernet14/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
CVML/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
gfyoung/pandas | pandas/tests/series/methods/test_shift.py | 3 | 13266 | import numpy as np
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
NaT,
Series,
TimedeltaIndex,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestShift:
@pytest.mark.parametrize(
"ser",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, ser, shift_size):
# GH22397
assert ser.shift(shift_size) is not ser
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1min")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH#22397
ser = Series(range(5), index=date_range("2017", periods=5))
assert ser.shift(freq=move_by_freq) is not ser
def test_shift(self, datetime_series):
shifted = datetime_series.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, datetime_series.index)
tm.assert_index_equal(unshifted.index, datetime_series.index)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_series.values[:-1]
)
offset = BDay()
shifted = datetime_series.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
unshifted = datetime_series.shift(0, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
shifted = datetime_series.shift(1, freq="B")
unshifted = shifted.shift(-1, freq="B")
tm.assert_series_equal(unshifted, datetime_series)
# corner case
unshifted = datetime_series.shift(0)
tm.assert_series_equal(unshifted, datetime_series)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, BDay())
tm.assert_series_equal(shifted2, shifted3)
tm.assert_series_equal(ps, shifted2.shift(-1, "B"))
msg = "Given freq D does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=BDay())
tm.assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
s1 = Series(np.arange(5, dtype=dtype), index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_series_equal(result, expected)
# GH#8260
# with tz
s = Series(
date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
)
result = s - s.shift()
exp = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
tm.assert_series_equal(result, exp)
# incompat tz
s2 = Series(date_range("2000-01-01 09:00:00", periods=5, tz="CET"), name="foo")
msg = "DatetimeArray subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
s - s2
def test_shift2(self):
ts = Series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
msg = "Cannot shift with no freq"
with pytest.raises(NullFrequencyError, match=msg):
idx.shift(1)
def test_shift_fill_value(self):
# GH#24128
ts = Series(
[1.0, 2.0, 3.0, 4.0, 5.0], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = Series(
[0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("1/1/2000", periods=5, freq="H")
)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_series_equal(result, exp)
exp = Series(
[0.0, 0.0, 1.0, 2.0, 3.0], index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(2, fill_value=0.0)
tm.assert_series_equal(result, exp)
ts = Series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert res.dtype == ts.dtype
def test_shift_categorical_fill_value(self):
ts = Series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = Series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = "'fill_value=f' is not present in this Categorical's categories"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_dst(self):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
s = Series(dates)
res = s.shift(0)
tm.assert_series_equal(res, s)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = s.shift(ex)
exp = Series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_series):
# TODO: remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
tm.assert_series_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
shifted = datetime_series.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(datetime_series, unshifted)
shifted2 = datetime_series.tshift(freq=datetime_series.index.freq)
tm.assert_series_equal(shifted, shifted2)
inferred_ts = Series(
datetime_series.values, Index(np.asarray(datetime_series.index)), name="ts"
)
shifted = inferred_ts.tshift(1)
expected = datetime_series.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(unshifted, inferred_ts)
no_freq = datetime_series[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_series):
# GH#11631
with tm.assert_produces_warning(FutureWarning):
datetime_series.tshift()
def test_period_index_series_shift_with_freq(self):
ps = tm.makePeriodSeries()
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_series_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_series_equal(shifted, shifted2)
shifted3 = ps.shift(freq=BDay())
tm.assert_series_equal(shifted, shifted3)
def test_datetime_series_shift_with_freq(self, datetime_series):
shifted = datetime_series.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_series_equal(datetime_series, unshifted)
shifted2 = datetime_series.shift(freq=datetime_series.index.freq)
tm.assert_series_equal(shifted, shifted2)
inferred_ts = Series(
datetime_series.values, Index(np.asarray(datetime_series.index)), name="ts"
)
shifted = inferred_ts.shift(1, freq="infer")
expected = datetime_series.shift(1, freq="infer")
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(shifted, expected)
unshifted = shifted.shift(-1, freq="infer")
tm.assert_series_equal(unshifted, inferred_ts)
def test_period_index_series_shift_with_freq_error(self):
ps = tm.makePeriodSeries()
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="M")
def test_datetime_series_shift_with_freq_error(self, datetime_series):
no_freq = datetime_series[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.shift(freq="infer")
def test_shift_int(self, datetime_series):
ts = datetime_series.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_series_equal(shifted, expected)
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_categorical(self):
# GH#9416
s = Series(["a", "b", "c", "d"], dtype="category")
tm.assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna())
sp1 = s.shift(1)
tm.assert_index_equal(s.index, sp1.index)
assert np.all(sp1.values.codes[:1] == -1)
assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
sn2 = s.shift(-2)
tm.assert_index_equal(s.index, sn2.index)
assert np.all(sn2.values.codes[-2:] == -1)
assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
tm.assert_index_equal(s.values.categories, sp1.values.categories)
tm.assert_index_equal(s.values.categories, sn2.values.categories)
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
with tm.assert_produces_warning(FutureWarning):
result = ser.shift(1, fill_value=0)
expected = Series([pd.Timestamp(0), ser[0]])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods):
# GH#21275
ser = Series(
range(periods),
index=pd.date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = ser.shift(1, "2H")
expected = Series(
range(periods),
index=pd.date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = Series(input_data).shift(1)
expected = Series(output_data, dtype="float64")
tm.assert_series_equal(result, expected)
| bsd-3-clause |
kknox/rocBLAS | clients/benchmarks/perf_script/plotPerformance.py | 3 | 1743 | # ########################################################################
# Copyright 2016 Advanced Micro Devices, Inc.
#
# ########################################################################
# to use this script, you will need to download and install the 32-BIT VERSION of:
# - Python 2.7 x86 (32-bit) - http://www.python.org/download/releases/2.7.1
#
# you will also need the 32-BIT VERSIONS of the following packages as not all the packages are available in 64bit at the time of this writing
# The ActiveState python distribution is recommended for windows
# (make sure to get the python 2.7-compatible packages):
# - NumPy 1.5.1 (32-bit, 64-bit unofficial, supports Python 2.4 - 2.7 and 3.1 - 3.2.) - http://sourceforge.net/projects/numpy/files/NumPy/
# - matplotlib 1.0.1 (32-bit & 64-bit, supports Python 2.4 - 2.7) - http://sourceforge.net/projects/matplotlib/files/matplotlib/
#
# For ActiveState Python, all that one should need to type is 'pypm install matplotlib'
import datetime
import sys
import argparse
import subprocess
import itertools
import os
import matplotlib.pyplot as plt
import pylab
from matplotlib.backends.backend_pdf import PdfPages
os.system( "grep NT sgemm.txt > sgemm_NT.csv" )
input = open ('sgemm_NT.csv', 'r')
x = []
y = []
shape = ''
for line in input:
line = line.replace("(", ",")
line = line.replace(")", ",")
value = line.split(',')
x.append(value[1])
y.append(value[7])
shape = value[0]
#print value
f = plt.figure()
plt.rcParams.update({'font.size':20})
plt.xlabel('M=N=K')
plt.ylabel("Gflop/s")
plt.title('rocBLAS SGEMM ' + shape)
plt.yticks()
plt.grid(True)
plt.legend( loc = 2)
plot1 = plt.plot(x, y)
f.savefig("sgemm.pdf", bbox_inches='tight')
input.close()
| mit |
soumyajitpaul/Soumyajit-Github-Byte-3 | explore.py | 10 | 6276 | import httplib2
from apiclient.discovery import build
import urllib
import json
import csv
import matplotlib.pyplot as plt
# This API key is provided by google as described in the tutorial
API_KEY = '... add your own ...'
# This is the table id for the fusion table
TABLE_ID = '... add your own ...'
# open the data stored in a file called "data.json"
try:
fp = open("data.json")
response = json.load(fp)
# but if that file does not exist, download the data from fusiontables
except IOError:
service = build('fusiontables', 'v1', developerKey=API_KEY)
query = "SELECT * FROM " + TABLE_ID + " WHERE AnimalType = 'DOG'"
response = service.query().sql(sql=query).execute()
fp = open("data.json", "w+")
json.dump(response, fp)
# this will be our summary of the data. For each column name, it will store
# a dictionary containing the number of occurences of each possible
# value for that column in the data. For example, for gender,
# the possible values are "MALE" and "FEMALE" and "UNKNOWN"
# summary will contain {"MALE": 5199, "FEMALE": 4354, "UNKNOWN":82}
# indicating that in the data, 5199 rows are marked as MALE,
# 4354 rows are marked as FEMALE and 82 rows are marked as UNKNOWN
summary = {}
columns = response['columns'] # the names of all columns
rows = response['rows'] # the actual data
# how many rows are in the data we downloaded?
# this should be the same as in the fusion table
print len(rows)
# we'll ignore some columns because they are
# not useful for our analysis (such as AnimalID and Name which
# are unique for every animal
ignore = [u'AnimalID', u'AnimalType', u'Name', u'IconName', u'icon type']
# now we want to summarize the data to facilitate exploration. To do
# so we will collect information about each *column* in the spreadsheet
for i in range(0, len(columns)): # loops through each column
# skip the rest of this loop if it's an ignore column
if columns[i] in ignore: continue
# will store unique values for this column
column_values = {}
# the name for this column
column_name = columns[i]
# loop through all of the rows of data
for row in rows:
# get the value stored for this column in this row
value = row[i]
# convert any string values to ascii, and any empty strings
# to a string called 'EMPTY' we can use as a value
if type(value) is unicode: value = row[i].encode('ascii','ignore')
if value == '': value = 'EMPTY'
if value == 'NaN' : value = 'EMPTY'
# increase the count the value already exists
try:
column_values[value] = column_values[value] + 1
# or set it to 1 if it does not exist
except KeyError:
column_values[value] = 1
# to facilitate exploration we want to also write our summary
# information for each column to disk in a csv file
fc = open("{0}.csv".format(column_name), "w+")
cwriter = csv.writer(fc)
cwriter.writerow(["name", "amount"])
# store the result in summary
summary[column_name] = column_values
# we also want to write summary information for the whole data set
# containing the name of each column, the max rows in any value for that column
# the min rows, the number of rows without a value, and the number of
# values only present in a single row ('unique')
fp = open("summary.csv", "w+")
headers = ["name", "max", "min", "empty", "unique"]
writer = csv.writer(fp)
dict_writer = csv.DictWriter(fp, headers)
writer.writerow(headers)
# to collect that data, we need to loop through the summary
# data we just created for each column. column_name is the column name,
# details is the dictionary containing {column_value: numrows, ...}
for column_name, details in summary.iteritems():
# rowcounts is a list containing the numrows numbers, but
# no column value names
rowcounts = details.values()
max_count = max(rowcounts)
min_count = min(rowcounts)
# we also want to know specifically how many rows had no
# entry for this column
try:
emptyrowcounts = details["EMPTY"]
# and that throws an error, we know that no rows were empty
except KeyError:
emptyrowcounts = 0
# for a sanity check we print this out to the screen
print("column {0} has {1} different keys of which the 'EMPTY' key holds {2} values".format(column_name, len(details), emptyrowcounts))
# we can also calculate fun things like the number of
# column values associated with only a single row
unique = 0
for numrows in details.itervalues():
if numrows == 1:
unique = unique + 1
# as a second sanity check, let's write this out to a csv summary file
row = {"name": column_name, "max": max_count, "min": min_count, "empty": emptyrowcounts,
"unique":unique}
dict_writer.writerow(row)
# now we will write this all to a csv file:
# we loop through the different possible
# column values, and write out how many rows
# had that value.
for column_value, numrows in details.iteritems():
# and write just the values for this out as a csv file
fc = open("{0}.csv".format(column_name), "a+")
kdict_writer = csv.DictWriter(fc, ["name", "amount"])
kdict_writer.writerow({"name":column_value, "amount":numrows})
# some of the data is numeric -- especially the latituted, longitude,
# zipfound, and zipplaced. You might also explore the data
# about, for example, month found/placed numerically (are some months
# likely to have more strays or placements than others?). You could
# even parse the date data and look at for example the impact of
# day of week. The code below shows some ways of visualizing
# latitude and longitude only.
latitude = summary['Latitude']
# need to replace the "EMPTY" key with a numeric value for plotting
latitude[0] = latitude['EMPTY']
del latitude['EMPTY']
latitude = dict((float(k), v) for k, v in latitude.iteritems())
# make a bar plot of all the latitudes we found
plt.bar(latitude.keys(), latitude.values())
plt.show()
# you may want to explore other visualizations
# such as a histogram or other aspects of the data
# including other columns
| apache-2.0 |
MontrealCorpusTools/Montreal-Forced-Aligner | setup.py | 1 | 3632 | import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
from setuptools.command.develop import develop
from setuptools.command.install import install
def readme():
with open('README.md') as f:
return f.read()
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
develop.run(self)
from montreal_forced_aligner.thirdparty.download import download_binaries
download_binaries()
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
install.run(self)
from montreal_forced_aligner.thirdparty.download import download_binaries
download_binaries()
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long', 'tests']
self.test_suite = True
def run_tests(self):
if __name__ == '__main__': # Fix for multiprocessing infinite recursion on Windows
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
if __name__ == '__main__':
from montreal_forced_aligner import __version__
setup(name='Montreal Forced Aligner',
version=__version__,
description='Montreal Forced Aligner is a package for aligning speech corpora through the use of '
'acoustic models and dictionaries using Kaldi functionality.',
long_description=readme(),
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Text Processing :: Linguistic',
],
keywords='phonology corpus phonetics alignment segmentation',
url='https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner',
author='Montreal Corpus Tools',
author_email='[email protected]',
packages=['montreal_forced_aligner',
'montreal_forced_aligner.aligner',
'montreal_forced_aligner.command_line',
'montreal_forced_aligner.config',
'montreal_forced_aligner.corpus',
'montreal_forced_aligner.features',
'montreal_forced_aligner.g2p',
'montreal_forced_aligner.gui',
'montreal_forced_aligner.lm',
'montreal_forced_aligner.multiprocessing',
'montreal_forced_aligner.thirdparty',
'montreal_forced_aligner.trainers'],
install_requires=[
'praatio',
'numpy',
'tqdm',
'pyyaml',
'librosa',
'pyqt5',
'pyqtgraph',
'requests',
'sklearn',
'joblib'
],
python_requires='>=3.8',
entry_points={
'console_scripts': ['mfa=montreal_forced_aligner.command_line.mfa:main']
},
package_data={'montreal_forced_aligner.config': ['*.yaml']},
cmdclass={
'test': PyTest,
'develop': PostDevelopCommand,
'install': PostInstallCommand,
},
extras_require={
'testing': ['pytest'],
}
)
| mit |
Newmu/stylize | example.py | 2 | 2048 | import sys
import os
from matplotlib import pyplot as plt
from scipy.misc import imread,imsave
from stylize import render
def show_img(img,title):
plt.clf()
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.title(title)
plt.show()
if __name__ == "__main__":
try:
path = sys.argv[1]
except:
path = 'resources/iggy.jpg'
print "Going to go through a few examples using the stylize.render"
# Load an image into a numpy format and see it
img = imread(path)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.title("Our source image, close to continue")
plt.show()
print "Please wait, rendering..."
defaults = render(img,verbose=True)
show_img(defaults,"Default stylization - polygonal")
print "Please wait, rendering..."
landmarks = render(img,features='landmarks',verbose=True)
show_img(landmarks,"Landmark features for curved stylization")
print "Please wait, rendering..."
abstract = render(img,depth=4,verbose=True)
show_img(abstract,"A depth of 4 results in an abstract representation")
print "Please wait, rendering..."
more_detail = render(img,ratio=0.00005,verbose=True)
show_img(more_detail,"Ratio 0.00005 results greater detail")
print "Please wait, rendering..."
less_detail = render(img,ratio=0.001,verbose=True)
show_img(less_detail,"Ratio 0.001 results in less detail")
print "Please wait, rendering... this one's going to take a minute or so"
smoother = render(img,iterations=25,verbose=True)
show_img(smoother,"Averaging over 25 iterations to make it smoother")
print "Please wait, rendering..."
aa = render(img,anti_aliasing=True,verbose=True)
show_img(aa,"Anti-aliasing to fight jaggies")
print "Saved results are in the examples directory!"
imsave('example_images/defaults.png',defaults)
imsave('example_images/landmarks.png',landmarks)
imsave('example_images/abstract.png',abstract)
imsave('example_images/more_detail.png',more_detail)
imsave('example_images/less_detail.png',less_detail)
imsave('example_images/smoother.png',smoother)
imsave('example_images/aa.png',aa)
| mit |
haochenuw/eigen-mpc | plots/phase2_scatter.py | 1 | 3959 | from matplotlib import rc, pyplot as plt
import numpy as np
import glob
import os
from itertools import cycle
import random
for bw in [32, 64]:
if bw == 64:
dirname = "experiments/results/phase2_accuracy_64_100/"
else:
dirname = "experiments/results/phase2_accuracy_32_100/"
files_cgd = glob.glob(os.path.join(dirname, "*_cgd_*.out"))
# if bw == 32: # only take half of the files
# files_cgd = [files_cgd[i] for i in random.sample(xrange(len(files_cgd)), 100)]
num_iterations = 20
# file x iteration -> (error_dist, error_objective)
data_cgd = np.ndarray(shape=(len(files_cgd), num_iterations, 2))
data_cgd[:] = np.NAN
# file -> condition number
condition_numbers_cgd = np.ndarray(shape=(len(files_cgd)))
condition_numbers_cgd[:] = np.NAN
for i_f, filename in enumerate(files_cgd):
lines = open(filename).read().splitlines()
index_obj = lines.index("Objective function on solution:") + 1
objective_on_solution = lines[index_obj]
condition_numbers_cgd[i_f] = lines[-1]
for iteration in xrange(num_iterations):
# This is to ignore files with wrong format
try:
error_dist, objective = lines[3+iteration].split()[1:3]
error_objective = abs(float(objective) - float(objective_on_solution)) / float(objective_on_solution)
data_cgd[i_f, iteration, 0:2] = error_dist, error_objective
except Exception as e:
print filename
files_cholesky = glob.glob(os.path.join(dirname, "*_cholesky_*.out"))
# file x iteration -> (error_dist, error_objective)
data_cholesky = np.ndarray(shape=(len(files_cholesky)))
data_cholesky[:] = np.NAN
# file -> condition number
condition_numbers_cholesky = np.ndarray(shape=(len(files_cholesky)))
condition_numbers_cholesky[:] = np.NAN
for i_f, filename in enumerate(files_cholesky):
lines = open(filename).read().splitlines()
index_obj = lines.index("Objective function on solution:") + 1
objective_on_solution = lines[index_obj]
condition_numbers_cholesky[i_f] = lines[-1]
index_result = lines.index("result:") + 2
result = np.array(map(float, lines[index_result].split()))
error_dist = lines[1].split()[5]
objective = None # We do not have the objective value on Cholesky instances
#error_objective = abs(float(objective) - float(objective_on_solution))
data_cholesky[i_f] = error_dist
# set pyplot look
rc('text', usetex=True)
rc('text.latex', preamble='\usepackage{lmodern}')
rc('lines', linewidth=1)
rc('patch', linewidth=1)
rc('axes', linewidth=1)
rc('figure', figsize=(4.5, 3))
#rc('errorbar', capsize=5)
rc('savefig', format='pdf')
ax = plt.gca()
symbols = ["s", "*", "d", "v", "."]
colours = ['#404096', '#529DB7', '#7DB874', '#E39C37', '#D92120', '#000000']
#colours = ['#57a3ad', '#dea73a', '#d92120']
colourcycler = cycle(colours)
symbolcycler = cycle(symbols)
#next(colourcycler)
#next(symbolcycler)
iters=[4,9,14,19]
ax.scatter(condition_numbers_cholesky, data_cholesky, c=next(colourcycler), marker=next(symbolcycler), linewidth=0.1)
for iteration in iters:
ax.scatter(condition_numbers_cgd, data_cgd[:,iteration,0], c=next(colourcycler), marker=next(symbolcycler), linewidth=0.1)
ax.set_yscale('log')
#ax.set_xscale('log')
#ax.set_xlim([1,11])
if bw == 64:
ax.set_ylim([1e-18, 1])
else:
ax.set_ylim([1e-6, 100])
plt.legend(["\small Cholesky"] + ["\small CGD {}".format(i+1) for i in iters], loc=3, ncol=3, mode="expand")
#plt.legend(["CGD {}".format(i+1) for i in iters], loc=4)
plt.ylabel(r"Error")
plt.xlabel(r"Condition Number $\kappa$")
plt.tight_layout()
plt.savefig("plot_scatter_2_norm_{}.pdf".format(bw), transparent=True)
plt.show()
| gpl-3.0 |
boundarydevices/android_external_chromium_org | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
rgommers/statsmodels | statsmodels/base/model.py | 1 | 69364 | from __future__ import print_function
from statsmodels.compat.python import iterkeys, lzip, range, reduce
import numpy as np
from scipy import stats
from statsmodels.base.data import handle_data
from statsmodels.tools.tools import recipr, nan_dot
from statsmodels.stats.contrast import ContrastResults
from statsmodels.tools.decorators import resettable_cache, cache_readonly
import statsmodels.base.wrapper as wrap
from statsmodels.tools.numdiff import approx_fprime
from statsmodels.formula import handle_formula_data
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.base.optimizer import Optimizer
_model_params_doc = """
Parameters
----------
endog : array-like
1-d endogenous response variable. The dependent variable.
exog : array-like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user. See
:func:`statsmodels.tools.add_constant`."""
_missing_param_doc = """\
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'none.'"""
_extra_param_doc = """
hasconst : None or bool
Indicates whether the RHS includes a user-supplied constant. If True,
a constant is not checked for and k_constant is set to 1 and all
result statistics are calculated as if a constant is present. If
False, a constant is not checked for and k_constant is set to 0.
"""
class Model(object):
__doc__ = """
A (predictive) statistical model. Intended to be subclassed not used.
%(params_doc)s
%(extra_params_doc)s
Notes
-----
`endog` and `exog` are references to any data provided. So if the data is
already stored in numpy arrays and it is changed then `endog` and `exog`
will change as well.
""" % {'params_doc' : _model_params_doc,
'extra_params_doc' : _missing_param_doc + _extra_param_doc}
def __init__(self, endog, exog=None, **kwargs):
missing = kwargs.pop('missing', 'none')
hasconst = kwargs.pop('hasconst', None)
self.data = self._handle_data(endog, exog, missing, hasconst,
**kwargs)
self.k_constant = self.data.k_constant
self.exog = self.data.exog
self.endog = self.data.endog
self._data_attr = []
self._data_attr.extend(['exog', 'endog', 'data.exog', 'data.endog',
'data.orig_endog', 'data.orig_exog'])
# store keys for extras if we need to recreate model instance
# we don't need 'missing', maybe we need 'hasconst'
self._init_keys = list(kwargs.keys())
if hasconst is not None:
self._init_keys.append('hasconst')
def _get_init_kwds(self):
"""return dictionary with extra keys used in model.__init__
"""
kwds = dict(((key, getattr(self, key, None))
for key in self._init_keys))
return kwds
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
data = handle_data(endog, exog, missing, hasconst, **kwargs)
# kwargs arrays could have changed, easier to just attach here
for key in kwargs:
# pop so we don't start keeping all these twice or references
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError: # panel already pops keys in data handling
pass
return data
@classmethod
def from_formula(cls, formula, data, subset=None, *args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
subset : array-like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model.
Returns
-------
model : Model instance
Notes
------
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
#TODO: provide a docs template for args/kwargs from child models
#TODO: subset could use syntax. issue #469.
if subset is not None:
data = data.ix[subset]
endog, exog = handle_formula_data(data, None, formula)
mod = cls(endog, exog, *args, **kwargs)
mod.formula = formula
# since we got a dataframe, attach the original
mod.data.frame = data
return mod
@property
def endog_names(self):
return self.data.ynames
@property
def exog_names(self):
return self.data.xnames
def fit(self):
"""
Fit a model to data.
"""
raise NotImplementedError
def predict(self, params, exog=None, *args, **kwargs):
"""
After a model has been fit predict returns the fitted values.
This is a placeholder intended to be overwritten by individual models.
"""
raise NotImplementedError
class LikelihoodModel(Model):
"""
Likelihood model is a subclass of Model.
"""
def __init__(self, endog, exog=None, **kwargs):
super(LikelihoodModel, self).__init__(endog, exog, **kwargs)
self.initialize()
def initialize(self):
"""
Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed.
"""
pass
# TODO: if the intent is to re-initialize the model with new data then this
# method needs to take inputs...
def loglike(self, params):
"""
Log-likelihood of model.
"""
raise NotImplementedError
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
"""
raise NotImplementedError
def information(self, params):
"""
Fisher information matrix of model
Returns -Hessian of loglike evaluated at params.
"""
raise NotImplementedError
def hessian(self, params):
"""
The Hessian matrix of the model
"""
raise NotImplementedError
def fit(self, start_params=None, method='newton', maxiter=100,
full_output=True, disp=True, fargs=(), callback=None,
retall=False, **kwargs):
"""
Fit method for likelihood based models
Parameters
----------
start_params : array-like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
fargs : tuple, optional
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for solvers (see returned Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
This many terms are used for the Hessian approximation.
factr : float
A stop condition that is a variant of relative error.
pgtol : float
A stop condition that uses the projected gradient.
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
maxfun : int
Maximum number of function evaluations to make.
bounds : sequence
(min, max) pairs for each element in x,
defining the bounds on that parameter.
Use None for one of min or max when there is no bound
in that direction.
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : integer
The number of basin hopping iterations.
niter_success : integer
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : integer
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
"""
Hinv = None # JP error if full_output=0, Hinv not defined
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
elif self.exog is not None:
# fails for shape (K,)?
start_params = [0] * self.exog.shape[1]
else:
raise ValueError("If exog is None, then start_params should "
"be specified")
# TODO: separate args from nonarg taking score and hessian, ie.,
# user-supplied and numerically evaluated estimate frprime doesn't take
# args in most (any?) of the optimize function
nobs = self.endog.shape[0]
f = lambda params, *args: -self.loglike(params, *args) / nobs
score = lambda params: -self.score(params) / nobs
try:
hess = lambda params: -self.hessian(params) / nobs
except:
hess = None
if method == 'newton':
score = lambda params: self.score(params) / nobs
hess = lambda params: self.hessian(params) / nobs
#TODO: why are score and hess positive?
optimizer = Optimizer()
xopt, retvals, optim_settings = optimizer._fit(f, score, start_params,
fargs, kwargs,
hessian=hess,
method=method,
disp=disp,
maxiter=maxiter,
callback=callback,
retall=retall,
full_output=full_output)
#NOTE: this is for fit_regularized and should be generalized
cov_params_func = kwargs.setdefault('cov_params_func', None)
if not full_output: # xopt should be None and retvals is argmin
xopt = retvals
elif cov_params_func:
Hinv = cov_params_func(self, xopt, retvals)
elif method == 'newton' and full_output:
Hinv = np.linalg.inv(-retvals['Hessian']) / nobs
else:
try:
Hinv = np.linalg.inv(-1 * self.hessian(xopt))
except:
#might want custom warning ResultsWarning? NumericalWarning?
from warnings import warn
warndoc = ('Inverting hessian failed, no bse or '
'cov_params available')
warn(warndoc, RuntimeWarning)
Hinv = None
if 'cov_type' in kwargs:
cov_kwds = kwargs.get('cov_kwds', {})
kwds = {'cov_type':kwargs['cov_type'], 'cov_kwds':cov_kwds}
else:
kwds = {}
if 'use_t' in kwargs:
kwds['use_t'] = kwargs['use_t']
#prints for debugging
#print('kwargs inLikelihoodModel.fit', kwargs)
#print('kwds inLikelihoodModel.fit', kwds)
#TODO: add Hessian approximation and change the above if needed
mlefit = LikelihoodModelResults(self, xopt, Hinv, scale=1., **kwds)
#TODO: hardcode scale?
if isinstance(retvals, dict):
mlefit.mle_retvals = retvals
mlefit.mle_settings = optim_settings
return mlefit
#TODO: the below is unfinished
class GenericLikelihoodModel(LikelihoodModel):
"""
Allows the fitting of any likelihood function via maximum likelihood.
A subclass needs to specify at least the log-likelihood
If the log-likelihood is specified for each observation, then results that
require the Jacobian will be available. (The other case is not tested yet.)
Notes
-----
Optimization methods that require only a likelihood function are 'nm' and
'powell'
Optimization methods that require a likelihood function and a
score/gradient are 'bfgs', 'cg', and 'ncg'. A function to compute the
Hessian is optional for 'ncg'.
Optimization method that require a likelihood function, a score/gradient,
and a Hessian is 'newton'
If they are not overwritten by a subclass, then numerical gradient,
Jacobian and Hessian of the log-likelihood are caclulated by numerical
forward differentiation. This might results in some cases in precision
problems, and the Hessian might not be positive definite. Even if the
Hessian is not positive definite the covariance matrix of the parameter
estimates based on the outer product of the Jacobian might still be valid.
Examples
--------
see also subclasses in directory miscmodels
import statsmodels.api as sm
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog)
# in this dir
from model import GenericLikelihoodModel
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
import numpy as np
np.allclose(res.params, probit_res.params)
"""
def __init__(self, endog, exog=None, loglike=None, score=None,
hessian=None, missing='none', extra_params_names=None,
**kwds):
# let them be none in case user wants to use inheritance
if not loglike is None:
self.loglike = loglike
if not score is None:
self.score = score
if not hessian is None:
self.hessian = hessian
self.__dict__.update(kwds)
# TODO: data structures?
#TODO temporary solution, force approx normal
#self.df_model = 9999
#somewhere: CacheWriteWarning: 'df_model' cannot be overwritten
super(GenericLikelihoodModel, self).__init__(endog, exog,
missing=missing)
# this won't work for ru2nmnl, maybe np.ndim of a dict?
if exog is not None:
#try:
self.nparams = (exog.shape[1] if np.ndim(exog) == 2 else 1)
if extra_params_names is not None:
self._set_extra_params_names(extra_params_names)
def _set_extra_params_names(self, extra_params_names):
# check param_names
if extra_params_names is not None:
if self.exog is not None:
self.exog_names.extend(extra_params_names)
else:
self.data.xnames = extra_params_names
self.nparams = len(self.exog_names)
#this is redundant and not used when subclassing
def initialize(self):
if not self.score: # right now score is not optional
self.score = approx_fprime
if not self.hessian:
pass
else: # can use approx_hess_p if we have a gradient
if not self.hessian:
pass
#Initialize is called by
#statsmodels.model.LikelihoodModel.__init__
#and should contain any preprocessing that needs to be done for a model
from statsmodels.tools import tools
if self.exog is not None:
# assume constant
self.df_model = float(np_matrix_rank(self.exog) - 1)
self.df_resid = (float(self.exog.shape[0] -
np_matrix_rank(self.exog)))
else:
self.df_model = np.nan
self.df_resid = np.nan
super(GenericLikelihoodModel, self).initialize()
def expandparams(self, params):
'''
expand to full parameter array when some parameters are fixed
Parameters
----------
params : array
reduced parameter array
Returns
-------
paramsfull : array
expanded parameter array where fixed parameters are included
Notes
-----
Calling this requires that self.fixed_params and self.fixed_paramsmask
are defined.
*developer notes:*
This can be used in the log-likelihood to ...
this could also be replaced by a more general parameter
transformation.
'''
paramsfull = self.fixed_params.copy()
paramsfull[self.fixed_paramsmask] = params
return paramsfull
def reduceparams(self, params):
return params[self.fixed_paramsmask]
def loglike(self, params):
return self.loglikeobs(params).sum(0)
def nloglike(self, params):
return -self.loglikeobs(params).sum(0)
def loglikeobs(self, params):
return -self.nloglikeobs(params)
def score(self, params):
'''
Gradient of log-likelihood evaluated at params
'''
kwds = {}
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglike, **kwds).ravel()
def jac(self, params, **kwds):
'''
Jacobian/Gradient of log-likelihood evaluated at params for each
observation.
'''
#kwds.setdefault('epsilon', 1e-4)
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglikeobs, **kwds)
def hessian(self, params):
'''
Hessian of log-likelihood evaluated at params
'''
from statsmodels.tools.numdiff import approx_hess
# need options for hess (epsilon)
return approx_hess(params, self.loglike)
def fit(self, start_params=None, method='nm', maxiter=500, full_output=1,
disp=1, callback=None, retall=0, **kwargs):
"""
Fit the model using maximum likelihood.
The rest of the docstring is from
statsmodels.LikelihoodModel.fit
"""
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
else:
start_params = 0.1 * np.ones(self.nparams)
fit_method = super(GenericLikelihoodModel, self).fit
mlefit = fit_method(start_params=start_params,
method=method, maxiter=maxiter,
full_output=full_output,
disp=disp, callback=callback, **kwargs)
genericmlefit = GenericLikelihoodModelResults(self, mlefit)
#amend param names
exog_names = [] if (self.exog_names is None) else self.exog_names
k_miss = len(exog_names) - len(mlefit.params)
if not k_miss == 0:
if k_miss < 0:
self._set_extra_params_names(
['par%d' % i for i in range(-k_miss)])
else:
# I don't want to raise after we have already fit()
import warnings
warnings.warn('more exog_names than parameters', UserWarning)
return genericmlefit
#fit.__doc__ += LikelihoodModel.fit.__doc__
class Results(object):
"""
Class to contain model results
Parameters
----------
model : class instance
the previously specified model instance
params : array
parameter estimates from the fit model
"""
def __init__(self, model, params, **kwd):
self.__dict__.update(kwd)
self.initialize(model, params, **kwd)
self._data_attr = []
def initialize(self, model, params, **kwd):
self.params = params
self.model = model
if hasattr(model, 'k_constant'):
self.k_constant = model.k_constant
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray or pandas.Series
See self.model.predict
"""
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.orig_exog.design_info.builder,
exog)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
return self.model.predict(self.params, exog, *args, **kwargs)
#TODO: public method?
class LikelihoodModelResults(Results):
"""
Class to contain results from likelihood models
Parameters
-----------
model : LikelihoodModel instance or subclass instance
LikelihoodModelResults holds a reference to the model that is fit.
params : 1d array_like
parameter estimates from estimated model
normalized_cov_params : 2d array
Normalized (before scaling) covariance of params. (dot(X.T,X))**-1
scale : float
For (some subset of models) scale will typically be the
mean square error from the estimated model (sigma^2)
Returns
-------
**Attributes**
mle_retvals : dict
Contains the values returned from the chosen optimization method if
full_output is True during the fit. Available only if the model
is fit by maximum likelihood. See notes below for the output from
the different methods.
mle_settings : dict
Contains the arguments passed to the chosen optimization method.
Available if the model is fit by maximum likelihood. See
LikelihoodModel.fit for more information.
model : model instance
LikelihoodResults contains a reference to the model that is fit.
params : ndarray
The parameters estimated for the model.
scale : float
The scaling factor of the model given during instantiation.
tvalues : array
The t-values of the standard errors.
Notes
-----
The covariance of params is given by scale times normalized_cov_params.
Return values by solver if full_output is True during fit:
'newton'
fopt : float
The value of the (negative) loglikelihood at its
minimum.
iterations : int
Number of iterations performed.
score : ndarray
The score vector at the optimum.
Hessian : ndarray
The Hessian at the optimum.
warnflag : int
1 if maxiter is exceeded. 0 if successful convergence.
converged : bool
True: converged. False: did not converge.
allvecs : list
List of solutions at each iteration.
'nm'
fopt : float
The value of the (negative) loglikelihood at its
minimum.
iterations : int
Number of iterations performed.
warnflag : int
1: Maximum number of function evaluations made.
2: Maximum number of iterations reached.
converged : bool
True: converged. False: did not converge.
allvecs : list
List of solutions at each iteration.
'bfgs'
fopt : float
Value of the (negative) loglikelihood at its minimum.
gopt : float
Value of gradient at minimum, which should be near 0.
Hinv : ndarray
value of the inverse Hessian matrix at minimum. Note
that this is just an approximation and will often be
different from the value of the analytic Hessian.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
warnflag : int
1: Maximum number of iterations exceeded. 2: Gradient
and/or function calls are not changing.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
'lbfgs'
fopt : float
Value of the (negative) loglikelihood at its minimum.
gopt : float
Value of gradient at minimum, which should be near 0.
fcalls : int
Number of calls to loglike.
warnflag : int
Warning flag:
- 0 if converged
- 1 if too many function evaluations or too many iterations
- 2 if stopped for another reason
converged : bool
True: converged. False: did not converge.
'powell'
fopt : float
Value of the (negative) loglikelihood at its minimum.
direc : ndarray
Current direction set.
iterations : int
Number of iterations performed.
fcalls : int
Number of calls to loglike.
warnflag : int
1: Maximum number of function evaluations. 2: Maximum number
of iterations.
converged : bool
True : converged. False: did not converge.
allvecs : list
Results at each iteration.
'cg'
fopt : float
Value of the (negative) loglikelihood at its minimum.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
warnflag : int
1: Maximum number of iterations exceeded. 2: Gradient and/
or function calls not changing.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
'ncg'
fopt : float
Value of the (negative) loglikelihood at its minimum.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
hcalls : int
Number of calls to hessian.
warnflag : int
1: Maximum number of iterations exceeded.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
"""
# by default we use normal distribution
# can be overwritten by instances or subclasses
use_t = False
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
**kwargs):
super(LikelihoodModelResults, self).__init__(model, params)
self.normalized_cov_params = normalized_cov_params
self.scale = scale
# robust covariance
# We put cov_type in kwargs so subclasses can decide in fit whether to
# use this generic implementation
if 'use_t' in kwargs:
use_t = kwargs['use_t']
if use_t is not None:
self.use_t = use_t
if 'cov_type' in kwargs:
cov_type = kwargs.get('cov_type', 'nonrobust')
cov_kwds = kwargs.get('cov_kwds', {})
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
from statsmodels.base.covtype import get_robustcov_results
if cov_kwds is None:
cov_kwds = {}
use_t = self.use_t
# TODO: we shouldn't need use_t in get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
def normalized_cov_params(self):
raise NotImplementedError
def _get_robustcov_results(self, cov_type='nonrobust', use_self=True,
use_t=None, **cov_kwds):
from statsmodels.base.covtype import get_robustcov_results
if cov_kwds is None:
cov_kwds = {}
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
# TODO: we shouldn't need use_t in get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
@cache_readonly
def llf(self):
return self.model.loglike(self.params)
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def tvalues(self):
"""
Return the t-statistic for a given parameter estimate.
"""
return self.params / self.bse
@cache_readonly
def pvalues(self):
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid)*2
else:
return stats.norm.sf(np.abs(self.tvalues))*2
def cov_params(self, r_matrix=None, column=None, scale=None, cov_p=None,
other=None):
"""
Returns the variance/covariance matrix.
The variance/covariance matrix can be of a linear contrast
of the estimates of params or all params multiplied by scale which
will usually be an estimate of sigma^2. Scale is assumed to be
a scalar.
Parameters
----------
r_matrix : array-like
Can be 1d, or 2d. Can be used alone or with other.
column : array-like, optional
Must be used on its own. Can be 0d or 1d see below.
scale : float, optional
Can be specified or not. Default is None, which means that
the scale argument is taken from the model.
other : array-like, optional
Can be used when r_matrix is specified.
Returns
-------
cov : ndarray
covariance matrix of the parameter estimates or of linear
combination of parameter estimates. See Notes.
Notes
-----
(The below are assumed to be in matrix notation.)
If no argument is specified returns the covariance matrix of a model
``(scale)*(X.T X)^(-1)``
If contrast is specified it pre and post-multiplies as follows
``(scale) * r_matrix (X.T X)^(-1) r_matrix.T``
If contrast and other are specified returns
``(scale) * r_matrix (X.T X)^(-1) other.T``
If column is specified returns
``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d
OR
``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d
"""
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
dot_fun = nan_dot
else:
dot_fun = np.dot
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'(unnormalized) covariances')
if column is not None and (r_matrix is not None or other is not None):
raise ValueError('Column should be specified without other '
'arguments.')
if other is not None and r_matrix is None:
raise ValueError('other can only be specified with r_matrix')
if cov_p is None:
if hasattr(self, 'cov_params_default'):
cov_p = self.cov_params_default
else:
if scale is None:
scale = self.scale
cov_p = self.normalized_cov_params * scale
if column is not None:
column = np.asarray(column)
if column.shape == ():
return cov_p[column, column]
else:
#return cov_p[column][:, column]
return cov_p[column[:, None], column]
elif r_matrix is not None:
r_matrix = np.asarray(r_matrix)
if r_matrix.shape == ():
raise ValueError("r_matrix should be 1d or 2d")
if other is None:
other = r_matrix
else:
other = np.asarray(other)
tmp = dot_fun(r_matrix, dot_fun(cov_p, np.transpose(other)))
return tmp
else: # if r_matrix is None and column is None:
return cov_p
#TODO: make sure this works as needed for GLMs
def t_test(self, r_matrix, q_matrix=None, cov_p=None, scale=None,
use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array-like, str, tuple
- array : If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), since q_matrix is
deprecated.
q_matrix : array-like or scalar, optional
This is deprecated. See `r_matrix` and the examples for more
information on new usage. Can be either a scalar or a length p
row vector. If omitted and r_matrix is an array, `q_matrix` is
assumed to be a conformable array of zeros.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> r = np.zeros_like(results.params)
>>> r[5:] = [1,-1]
>>> print(r)
[ 0. 0. 0. 0. 0. 1. -1.]
r tests that the coefficients on the 5th and 6th independent
variable are the same.
>>> T_test = results.t_test(r)
>>> print(T_test)
<T contrast: effect=-1829.2025687192481, sd=455.39079425193762,
t=-4.0167754636411717, p=0.0015163772380899498, df_denom=9>
>>> T_test.effect
-1829.2025687192481
>>> T_test.sd
455.39079425193762
>>> T_test.tvalue
-4.0167754636411717
>>> T_test.pvalue
0.0015163772380899498
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.formula.api import ols
>>> dta = sm.datasets.longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1'
>>> t_test = results.t_test(hypotheses)
>>> print(t_test)
See Also
---------
tvalues : individual t statistics
f_test : for F tests
patsy.DesignInfo.linear_constraint
"""
from patsy import DesignInfo
if q_matrix is not None:
from warnings import warn
warn("The `q_matrix` keyword is deprecated and will be removed "
"in 0.6.0. See the documentation for the new API",
FutureWarning)
r_matrix = (r_matrix, q_matrix)
LC = DesignInfo(self.model.exog_names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
num_ttests = r_matrix.shape[0]
num_params = r_matrix.shape[1]
if cov_p is None and self.normalized_cov_params is None:
raise ValueError('Need covariance of parameters for computing '
'T statistics')
if num_params != self.params.shape[0]:
raise ValueError('r_matrix and params are not aligned')
if q_matrix is None:
q_matrix = np.zeros(num_ttests)
else:
q_matrix = np.asarray(q_matrix)
q_matrix = q_matrix.squeeze()
if q_matrix.size > 1:
if q_matrix.shape[0] != num_ttests:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
if use_t is None:
#switch to use_t false if undefined
use_t = (hasattr(self, 'use_t') and self.use_t)
_t = _sd = None
_effect = np.dot(r_matrix, self.params)
# nan_dot multiplies with the convention nan * 0 = 0
# Perform the test
if num_ttests > 1:
_sd = np.sqrt(np.diag(self.cov_params(
r_matrix=r_matrix, cov_p=cov_p)))
else:
_sd = np.sqrt(self.cov_params(r_matrix=r_matrix, cov_p=cov_p))
_t = (_effect - q_matrix) * recipr(_sd)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_t:
return ContrastResults(effect=_effect, t=_t, sd=_sd,
df_denom=df_resid)
else:
return ContrastResults(effect=_effect, statistic=_t, sd=_sd,
df_denom=df_resid,
distribution='norm')
def f_test(self, r_matrix, q_matrix=None, cov_p=None, scale=1.0,
invcov=None):
"""
Compute the F-test for a joint linear hypothesis.
This is a special case of `wald_test` that always uses the F
distribution.
Parameters
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), since q_matrix is
deprecated.
q_matrix : array-like
This is deprecated. See `r_matrix` and the examples for more
information on new usage. Can be either a scalar or a length p
row vector. If omitted and r_matrix is an array, `q_matrix` is
assumed to be a conformable array of zeros.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
Default is 1.0 for no scaling.
invcov : array-like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> A = np.identity(len(results.params))
>>> A = A[1:,:]
This tests that each coefficient is jointly statistically
significantly different from zero.
>>> print(results.f_test(A))
<F contrast: F=330.28533923463488, p=4.98403052872e-10,
df_denom=9, df_num=6>
Compare this to
>>> results.fvalue
330.2853392346658
>>> results.f_pvalue
4.98403096572e-10
>>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1]))
This tests that the coefficient on the 2nd and 3rd regressors are
equal and jointly that the coefficient on the 5th and 6th regressors
are equal.
>>> print(results.f_test(B))
<F contrast: F=9.740461873303655, p=0.00560528853174, df_denom=9,
df_num=2>
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.datasets import longley
>>> from statsmodels.formula.api import ols
>>> dta = longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
>>> f_test = results.f_test(hypotheses)
>>> print(f_test)
See Also
--------
statsmodels.stats.contrast.ContrastResults
wald_test
t_test
patsy.DesignInfo.linear_constraint
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
res = self.wald_test(r_matrix, q_matrix=q_matrix, cov_p=cov_p,
scale=scale, invcov=invcov, use_f=True)
return res
#TODO: untested for GLMs?
def wald_test(self, r_matrix, q_matrix=None, cov_p=None, scale=1.0,
invcov=None, use_f=None):
"""
Compute a Wald-test for a joint linear hypothesis.
Parameters
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), since q_matrix is
deprecated.
q_matrix : array-like
This is deprecated. See `r_matrix` and the examples for more
information on new usage. Can be either a scalar or a length p
row vector. If omitted and r_matrix is an array, `q_matrix` is
assumed to be a conformable array of zeros.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
Default is 1.0 for no scaling.
invcov : array-like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
use_f : bool
If True, then the F-distribution is used. If False, then the
asymptotic distribution, chisquare is used. If use_f is None, then
the F distribution is used if the model specifies that use_t is True.
The test statistic is proportionally adjusted for the distribution
by the number of constraints in the hypothesis.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
See also
--------
statsmodels.stats.contrast.ContrastResults
f_test
t_test
patsy.DesignInfo.linear_constraint
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
if use_f is None:
#switch to use_t false if undefined
use_f = (hasattr(self, 'use_t') and self.use_t)
from patsy import DesignInfo
if q_matrix is not None:
from warnings import warn
warn("The `q_matrix` keyword is deprecated and will be removed "
"in 0.6.0. See the documentation for the new API",
FutureWarning)
r_matrix = (r_matrix, q_matrix)
LC = DesignInfo(self.model.exog_names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
if (self.normalized_cov_params is None and cov_p is None and
invcov is None):
raise ValueError('need covariance of parameters for computing '
'F statistics')
cparams = np.dot(r_matrix, self.params[:, None])
J = float(r_matrix.shape[0]) # number of restrictions
if q_matrix is None:
q_matrix = np.zeros(J)
else:
q_matrix = np.asarray(q_matrix)
if q_matrix.ndim == 1:
q_matrix = q_matrix[:, None]
if q_matrix.shape[0] != J:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
Rbq = cparams - q_matrix
if invcov is None:
cov_p = self.cov_params(r_matrix=r_matrix, cov_p=cov_p)
if np.isnan(cov_p).max():
raise ValueError("r_matrix performs f_test for using "
"dimensions that are asymptotically "
"non-normal")
invcov = np.linalg.inv(cov_p)
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
F = nan_dot(nan_dot(Rbq.T, invcov), Rbq)
else:
F = np.dot(np.dot(Rbq.T, invcov), Rbq)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_f:
F /= J
return ContrastResults(F=F, df_denom=df_resid,
df_num=invcov.shape[0])
else:
return ContrastResults(chi2=F, df_denom=J, statistic=F,
distribution='chi2', distargs=(J,))
def conf_int(self, alpha=.05, cols=None, method='default'):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
method : string
Not Implemented Yet
Method to estimate the confidence_interval.
"Default" : uses self.bse which is based on inverse Hessian for MLE
"hjjh" :
"jac" :
"boot-bse"
"boot_quant"
"profile"
Returns
--------
conf_int : array
Each row contains [lower, upper] limits of the confidence interval
for the corresponding parameter. The first column contains all
lower, the second column contains all upper limits.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> results.conf_int()
array([[-5496529.48322745, -1467987.78596704],
[ -177.02903529, 207.15277984],
[ -0.1115811 , 0.03994274],
[ -3.12506664, -0.91539297],
[ -1.5179487 , -0.54850503],
[ -0.56251721, 0.460309 ],
[ 798.7875153 , 2859.51541392]])
>>> results.conf_int(cols=(2,3))
array([[-0.1115811 , 0.03994274],
[-3.12506664, -0.91539297]])
Notes
-----
The confidence interval is based on the standard normal distribution.
Models wish to use a different distribution should overwrite this
method.
"""
bse = self.bse
if self.use_t:
dist = stats.t
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
q = dist.ppf(1 - alpha / 2, df_resid)
else:
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = self.params[cols] - q * bse[cols]
upper = self.params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def save(self, fname, remove_data=False):
'''
save a pickle of this instance
Parameters
----------
fname : string or filehandle
fname can be a string to a file path or filename, or a filehandle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
Notes
-----
If remove_data is true and the model result does not implement a
remove_data method then this will raise an exception.
'''
from statsmodels.iolib.smpickle import save_pickle
if remove_data:
self.remove_data()
save_pickle(self, fname)
@classmethod
def load(cls, fname):
'''
load a pickle, (class method)
Parameters
----------
fname : string or filehandle
fname can be a string to a file path or filename, or a filehandle.
Returns
-------
unpickled instance
'''
from statsmodels.iolib.smpickle import load_pickle
return load_pickle(fname)
def remove_data(self):
'''remove data arrays, all nobs arrays from result and model
This reduces the size of the instance, so it can be pickled with less
memory. Currently tested for use with predict from an unpickled
results and model instance.
.. warning:: Since data and some intermediate results have been removed
calculating new statistics that require them will raise exceptions.
The exception will occur the first time an attribute is accessed
that has been set to None.
Not fully tested for time series models, tsa, and might delete too much
for prediction or not all that would be possible.
The list of arrays to delete is maintained as an attribute of the
result and model instance, except for cached values. These lists could
be changed before calling remove_data.
'''
def wipe(obj, att):
#get to last element in attribute path
p = att.split('.')
att_ = p.pop(-1)
try:
obj_ = reduce(getattr, [obj] + p)
#print(repr(obj), repr(att))
#print(hasattr(obj_, att_))
if hasattr(obj_, att_):
#print('removing3', att_)
setattr(obj_, att_, None)
except AttributeError:
pass
model_attr = ['model.' + i for i in self.model._data_attr]
for att in self._data_attr + model_attr:
#print('removing', att)
wipe(self, att)
data_in_cache = getattr(self, 'data_in_cache', [])
data_in_cache += ['fittedvalues', 'resid', 'wresid']
for key in data_in_cache:
try:
self._cache[key] = None
except (AttributeError, KeyError):
pass
class LikelihoodResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
'bse': 'columns',
'pvalues': 'columns',
'tvalues': 'columns',
'resid': 'rows',
'fittedvalues': 'rows',
'normalized_cov_params': 'cov',
}
_wrap_attrs = _attrs
_wrap_methods = {
'cov_params': 'cov',
'conf_int': 'columns'
}
wrap.populate_wrapper(LikelihoodResultsWrapper,
LikelihoodModelResults)
class ResultMixin(object):
@cache_readonly
def df_modelwc(self):
# collect different ways of defining the number of parameters, used for
# aic, bic
if hasattr(self, 'df_model'):
if hasattr(self, 'hasconst'):
hasconst = self.hasconst
else:
# default assumption
hasconst = 1
return self.df_model + hasconst
else:
return self.params.size
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * (self.df_modelwc)
@cache_readonly
def bic(self):
return -2 * self.llf + np.log(self.nobs) * (self.df_modelwc)
@cache_readonly
def jacv(self):
'''cached Jacobian of log-likelihood
'''
return self.model.jac(self.params)
@cache_readonly
def hessv(self):
'''cached Hessian of log-likelihood
'''
return self.model.hessian(self.params)
@cache_readonly
def covjac(self):
'''
covariance of parameters based on outer product of jacobian of
log-likelihood
'''
## if not hasattr(self, '_results'):
## raise ValueError('need to call fit first')
## #self.fit()
## self.jacv = jacv = self.jac(self._results.params)
jacv = self.jacv
return np.linalg.inv(np.dot(jacv.T, jacv))
@cache_readonly
def covjhj(self):
'''covariance of parameters based on HJJH
dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood
name should be covhjh
'''
jacv = self.jacv
## hessv = self.hessv
## hessinv = np.linalg.inv(hessv)
## self.hessinv = hessinv
hessinv = self.cov_params()
return np.dot(hessinv, np.dot(np.dot(jacv.T, jacv), hessinv))
@cache_readonly
def bsejhj(self):
'''standard deviation of parameter estimates based on covHJH
'''
return np.sqrt(np.diag(self.covjhj))
@cache_readonly
def bsejac(self):
'''standard deviation of parameter estimates based on covjac
'''
return np.sqrt(np.diag(self.covjac))
def bootstrap(self, nrep=100, method='nm', disp=0, store=1):
"""simple bootstrap to get mean and variance of estimator
see notes
Parameters
----------
nrep : int
number of bootstrap replications
method : str
optimization method to use
disp : bool
If true, then optimization prints results
store : bool
If true, then parameter estimates for all bootstrap iterations
are attached in self.bootstrap_results
Returns
-------
mean : array
mean of parameter estimates over bootstrap replications
std : array
standard deviation of parameter estimates over bootstrap
replications
Notes
-----
This was mainly written to compare estimators of the standard errors of
the parameter estimates. It uses independent random sampling from the
original endog and exog, and therefore is only correct if observations
are independently distributed.
This will be moved to apply only to models with independently
distributed observations.
"""
results = []
print(self.model.__class__)
hascloneattr = True if hasattr(self, 'cloneattr') else False
for i in range(nrep):
rvsind = np.random.randint(self.nobs - 1, size=self.nobs)
#this needs to set startparam and get other defining attributes
#need a clone method on model
fitmod = self.model.__class__(self.endog[rvsind],
self.exog[rvsind, :])
if hascloneattr:
for attr in self.model.cloneattr:
setattr(fitmod, attr, getattr(self.model, attr))
fitres = fitmod.fit(method=method, disp=disp)
results.append(fitres.params)
results = np.array(results)
if store:
self.bootstrap_results = results
return results.mean(0), results.std(0), results
def get_nlfun(self, fun):
#I think this is supposed to get the delta method that is currently
#in miscmodels count (as part of Poisson example)
pass
class GenericLikelihoodModelResults(LikelihoodModelResults, ResultMixin):
"""
A results class for the discrete dependent variable models.
..Warning :
The following description has not been updated to this version/class.
Where are AIC, BIC, ....? docstring looks like copy from discretemod
Parameters
----------
model : A DiscreteModel instance
mlefit : instance of LikelihoodResults
This contains the numerical optimization results as returned by
LikelihoodModel.fit(), in a superclass of GnericLikelihoodModels
Returns
-------
*Attributes*
Warning most of these are not available yet
aic : float
Akaike information criterion. -2*(`llf` - p) where p is the number
of regressors including the intercept.
bic : float
Bayesian information criterion. -2*`llf` + ln(`nobs`)*p where p is the
number of regressors including the intercept.
bse : array
The standard errors of the coefficients.
df_resid : float
See model definition.
df_model : float
See model definition.
fitted_values : array
Linear predictor XB.
llf : float
Value of the loglikelihood
llnull : float
Value of the constant-only loglikelihood
llr : float
Likelihood ratio chi-squared statistic; -2*(`llnull` - `llf`)
llr_pvalue : float
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
prsquared : float
McFadden's pseudo-R-squared. 1 - (`llf`/`llnull`)
"""
def __init__(self, model, mlefit):
self.model = model
self.endog = model.endog
self.exog = model.exog
self.nobs = model.endog.shape[0]
# TODO: possibly move to model.fit()
# and outsource together with patching names
if hasattr(model, 'df_model'):
self.df_model = model.df_model
else:
self.df_model = len(mlefit.params)
# retrofitting the model, used in t_test TODO: check design
self.model.df_model = self.df_model
if hasattr(model, 'df_resid'):
self.df_resid = model.df_resid
else:
self.df_resid = self.endog.shape[0] - self.df_model
# retrofitting the model, used in t_test TODO: check design
self.model.df_resid = self.df_resid
self._cache = resettable_cache()
self.__dict__.update(mlefit.__dict__)
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Maximum Likelihood']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), # [self.df_resid]),
('Df Model:', None), # [self.df_model])
]
top_right = [ # ('R-squared:', ["%#8.3f" % self.rsquared]),
# ('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
# ('F-statistic:', ["%#8.4g" % self.fvalue] ),
# ('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None), # ["%#6.4g" % self.llf]),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=False)
return smry
| bsd-3-clause |
Unidata/MetPy | dev/_downloads/de92980ba106a56ee23a09ae59eaeb1d/GINI_Water_Vapor.py | 12 | 1765 | # Copyright (c) 2015,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
GINI Water Vapor Imagery
========================
Use MetPy's support for GINI files to read in a water vapor satellite image and plot the
data using CartoPy.
"""
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
from metpy.cbook import get_test_data
from metpy.io import GiniFile
from metpy.plots import add_metpy_logo, add_timestamp, colortables
###########################################
# Open the GINI file from the test data
f = GiniFile(get_test_data('WEST-CONUS_4km_WV_20151208_2200.gini'))
print(f)
###########################################
# Get a Dataset view of the data (essentially a NetCDF-like interface to the
# underlying data). Pull out the data and (x, y) coordinates. We use `metpy.parse_cf` to
# handle parsing some netCDF Climate and Forecasting (CF) metadata to simplify working with
# projections.
ds = xr.open_dataset(f)
x = ds.variables['x'][:]
y = ds.variables['y'][:]
dat = ds.metpy.parse_cf('WV')
###########################################
# Plot the image. We use MetPy's xarray/cartopy integration to automatically handle parsing
# the projection information.
fig = plt.figure(figsize=(10, 12))
add_metpy_logo(fig, 125, 145)
ax = fig.add_subplot(1, 1, 1, projection=dat.metpy.cartopy_crs)
wv_norm, wv_cmap = colortables.get_with_range('WVCIMSS', 100, 260)
wv_cmap.set_under('k')
im = ax.imshow(dat[:], cmap=wv_cmap, norm=wv_norm,
extent=(x.min(), x.max(), y.min(), y.max()), origin='upper')
ax.add_feature(cfeature.COASTLINE.with_scale('50m'))
add_timestamp(ax, f.prod_desc.datetime, y=0.02, high_contrast=True)
plt.show()
| bsd-3-clause |
thilbern/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
xuleiboy1234/autoTitle | tensorflow/tensorflow/contrib/learn/python/learn/estimators/debug_test.py | 46 | 32817 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Debug estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import operator
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import debug
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
NUM_EXAMPLES = 100
N_CLASSES = 5 # Cardinality of multiclass labels.
LABEL_DIMENSION = 3 # Dimensionality of regression labels.
def _train_test_split(features_and_labels):
features, labels = features_and_labels
train_set = (features[:int(len(features) / 2)], labels[:int(len(features) / 2)])
test_set = (features[int(len(features) / 2):], labels[int(len(features) / 2):])
return train_set, test_set
def _input_fn_builder(features, labels):
def input_fn():
feature_dict = {'features': constant_op.constant(features)}
my_labels = labels
if my_labels is not None:
my_labels = constant_op.constant(my_labels)
return feature_dict, my_labels
return input_fn
class DebugClassifierTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.labels = np.random.choice(
range(N_CLASSES), p=[0.1, 0.3, 0.4, 0.1, 0.1], size=NUM_EXAMPLES)
self.binary_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
self.binary_float_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
def testPredict(self):
"""Tests that DebugClassifier outputs the majority class."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictBinary(self):
"""Same as above for binary predictions."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
(train_features, train_labels), (
test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictProba(self):
"""Tests that DebugClassifier outputs observed class distribution."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
class_distribution = np.zeros((1, N_CLASSES))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testPredictProbaBinary(self):
"""Same as above but for binary classification."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
(train_features, train_labels), (
test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, int(label)] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugClassifier(n_classes=3),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
classifier = debug.DebugClassifier(config=run_config.RunConfig(
tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
classifier = debug.DebugClassifier(n_classes=3)
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_StringLabel(self):
"""Tests multi-class classification with string labels."""
def _input_fn_train():
labels = constant_op.constant([['foo'], ['bar'], ['baz'], ['bar']])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(
n_classes=3, label_keys=['foo', 'bar', 'baz'])
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(
weight_column_name='w',
n_classes=2,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(weight_column_name='w')
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict_classes(input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
model_dir = tempfile.mkdtemp()
classifier = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = debug.DebugClassifier(config=run_config.RunConfig(
tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=5)
def default_input_fn(unused_estimator, examples):
return feature_column_ops.parse_feature_columns_from_examples(
examples, feature_columns)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir, input_fn=default_input_fn)
class DebugRegressorTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.targets = np.random.rand(NUM_EXAMPLES, LABEL_DIMENSION)
def testPredictScores(self):
"""Tests that DebugRegressor outputs the mean target."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.targets])
mean_target = np.mean(train_labels, 0)
expected_prediction = np.vstack(
[mean_target for _ in range(test_labels.shape[0])])
classifier = debug.DebugRegressor(label_dimension=LABEL_DIMENSION)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_scores(input_fn=_input_fn_builder(test_features,
None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugRegressor(),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
model_dir = tempfile.mkdtemp()
regressor = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
if __name__ == '__main__':
test.main()
| mit |
fdion/stemgraphic | stemgraphic/text.py | 1 | 14774 | import math
import numpy as np
from operator import itemgetter
from warnings import warn
from .helpers import *
def quantize(df, column=None, display=750, leaf_order=1, random_state=None, scale=None, trim=None, zoom=None):
""" quantize
Converts a series into stem-and-leaf and back into decimal. This has the potential effect of decimating (or
truncating) values in a lossy way.
:param df: list, numpy array, time series, pandas or dask dataframe
:param column: specify which column (string or number) of the dataframe to use,
else the first numerical is selected
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param random_state: initial random seed for the sampling process, for reproducible research
:param scale: force a specific scale for building the plot. Defaults to None (automatic).
:param trim: ranges from 0 to 0.5 (50%) to remove from each end of the data set, defaults to None
:param zoom: zoom level, on top of calculated scale (+1, -1 etc)
:return: decimated df
"""
x = df if column is None else df[column]
scale, pair, rows, sorted_data, stems = stem_data(x, column=column, display=display, full=True,
leaf_order=leaf_order,
random_state=random_state,
scale=scale, trim=trim, zoom=zoom)
values = [(stem + leaf) * scale for stem, leaf in sorted_data]
return values
def stem_data(x, break_on=None, column=None, compact=False, display=300, full=False, leaf_order=1,
omin=None, omax=None, outliers=False, persistence=None, random_state=None, scale=None,
total_rows=None, trim=False, zoom=None):
""" Returns scale factor, key label and list of rows.
:param x: list, numpy array, time series, pandas or dask dataframe
:param break_on: force a break of the leaves at x in (5, 10), defaults to 10
:param column: specify which column (string or number) of the dataframe to use,
else the first numerical is selected
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param full: bool, if True returns all interim results including sorted data and stems
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param outliers: display outliers - these are from the full data set, not the sample. Defaults to Auto
:param omin: float, if already calculated, helps speed up the process for large data sets
:param omax: float, if already calculated, helps speed up the process for large data sets
:param persistence: persist sampled dataframe
:param random_state: initial random seed for the sampling process, for reproducible research
:param scale: force a specific scale for building the plot. Defaults to None (automatic)
:param total_rows: int, if already calculated, helps speed up the process for large data sets
:param trim: ranges from 0 to 0.5 (50%) to remove from each end of the data set, defaults to None
:param zoom: zoom level, on top of calculated scale (+1, -1 etc)
"""
rows = []
# Multivariate or not
try:
cols = len(x.columns)
except AttributeError:
# wasn't a multi column data frame, might be a list
cols = 1
if cols > 1:
if column is None:
# We have to figure out the first numerical column on our own
start_at = 1 if x.columns[0] == 'id' else 0
for i in range(start_at, len(x.columns)):
if x.dtypes[i] in ('int64', 'float64'):
column = i
break
#if dd:
# x = x[x.columns.values[column]]
#else:
x = x.ix[:, column]
# Sampling or not we need the absolute min/max
if omin is None or omax is None or total_rows is None:
omin, omax, total_rows = min_max_count(x, column) # very expensive if on disk, don't do it twice
n = total_rows
if n == 0:
return None
elif n > display:
try:
x = x.sample(n=display, random_state=random_state).values
except TypeError:
# We are here due to dask not supporting n=. We'll use less precise frac instead
frac = display / n
x = x.sample(frac=frac, random_state=random_state).compute().values
if persistence is not None:
if persistence[-4:] == '.pkl':
pd.Dataframe(x).to_pickle(persistence)
else:
pd.Dataframe(x).to_csv(persistence) # TODO: add feather, hdf5 etc
n = display
if n <= 300:
# Dixon
lines = math.floor(10 * math.log(n, 10))
else:
# Velleman
lines = math.floor(2 * math.sqrt(n))
try:
x = x[~np.isnan(x)]
xmin = x.min()
xmax = x.max()
except AttributeError:
xmin = min(x)
xmax = max(x)
try:
spread = xmax - xmin
except TypeError:
warn("Column data appears to be non numerical. Specify a numeric column.")
return None
# we will trim on the sample, or the whole data set
lowest, highest = percentile(x, trim) if trim else xmin, xmax
# scale_factor = as small as possible but lines * S must be >= spread
if lines == 0:
lines = 1
r_value = spread / lines
if scale: # we were passed a scale, use it
scale_factor = scale
else: # The bulk of the logic to figure out the best scaling and visualization
try:
scale_factor = pow(10, math.ceil(math.log(r_value, 10)))
except ValueError:
scale_factor = 1
check = math.floor(xmax / scale_factor - xmin / scale_factor + 1)
if check > lines:
scale_factor *= 10
elif (check < 7 and n >= 45) or check < 3:
scale_factor /= 10 # 30 lines on avg, up to 60 some lines max by bumping the scale
elif math.floor(check) * 2 <= lines + 1 and break_on is None:
break_on = 5
if zoom == -1 and break_on == 5:
break_on = None
elif zoom == -1:
break_on = 5
scale_factor /= 10
elif zoom == 1 and break_on == 5:
scale_factor *= 10
elif zoom == 1:
break_on = 5
scale_factor *= 10
if break_on is None:
break_on = 10
truncate_factor = scale_factor / pow(10, leaf_order)
# Now that we have a scale, we are going to round to it, trim outliers and split stem and leaf
rounded_data = [int(np.round(item / truncate_factor)) * truncate_factor for item in x if lowest <= item <= highest]
data = []
for val in rounded_data:
frac_part, int_part = math.modf(val / scale_factor)
round_frac = round(frac_part, 2)
if round_frac == 1:
round_frac = 0.0
int_part += 1.0
data.append((round_frac, int_part))
sorted_data = sorted(data, key=itemgetter(1, 0))
stems = list(set([s for l, s in sorted_data]))
current_stem = None
current_leaf = None
previous_mod = 0
row = ''
sign_transition = False
if xmin < 0 < xmax:
sign_transition = True
if outliers:
row = '{}\n ¡'.format(omin)
for leaf, stem in sorted_data:
#leaf = round(f_leaf, 1 + leaf_order)
if stem == current_stem:
ileaf = round(leaf * 10)
if sign_transition and stem == 0 and abs(leaf) == leaf:
sign_transition = False
rows.append(row)
row = '{:>3} | '.format(int(stem))
elif current_stem is not None and ileaf >= break_on == 5 and previous_mod > (ileaf % break_on):
rows.append(row)
row = ' | '
elif leaf_order > 1:
row += ' '
previous_mod = (ileaf % break_on)
row += str(round(abs(leaf), 1 + leaf_order))[2:leaf_order + 2]
else:
if row != '':
rows.append(row)
if current_stem is not None and not compact:
if break_on == 5 and row[0:4] != ' ':
row = ' | '
rows.append(row)
for missing in range(int(current_stem) + 1, int(stem)):
if int(current_stem) < 0 and missing == 0:
neg_zero = '{:>3} |'.format("-0")
rows.append(neg_zero)
empty_row = '{:>3} |'.format(missing)
rows.append(empty_row)
if break_on == 5:
rows.append(' | ')
current_leaf = str(round(abs(leaf), 1 + leaf_order))[2:leaf_order + 2].zfill(leaf_order)
if current_stem and int(current_leaf) >= break_on:
row = '{:>3} | '.format(int(stem))
rows.append(row)
stem_ind = ' '
else:
stem_ind = int(stem)
row = '{:>3} | {}'.format("-0" if stem == 0 and abs(leaf) != leaf else stem_ind, current_leaf)
current_stem = stem
# Potentially catching a last row
rows.append(row)
if outliers:
rows.append(' !\n{}'.format(omax))
key_label = "{}|{}".format(int(current_stem), current_leaf)
if full:
return scale_factor, key_label, rows, sorted_data, stems
else:
return scale_factor, key_label, rows
def stem_dot(df, asc=True, break_on=None, column=None, compact=False, display=300, leaf_order=1, legend_pos='best',
marker=None, outliers=True, random_state=None, scale=None, trim=False, unit='', zoom=None):
"""
:param df: list, numpy array, time series, pandas or dask dataframe
:param asc: stem sorted in ascending order, defaults to True
:param break_on: force a break of the leaves at x in (5, 10), defaults to 10
:param column: specify which column (string or number) of the dataframe to use,
else the first numerical is selected
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param legend_pos: One of 'top', 'bottom', 'best' or None, defaults to 'best'.
:param marker: char, symbol to use as marker. 'O' is default. Suggested alternatives: '*', '+', 'x', '.', 'o'
:param outliers: display outliers - these are from the full data set, not the sample. Defaults to Auto
:param random_state: initial random seed for the sampling process, for reproducible research
:param scale: force a specific scale for building the plot. Defaults to None (automatic).
:param trim: ranges from 0 to 0.5 (50%) to remove from each end of the data set, defaults to None
:param unit: specify a string for the unit ('$', 'Kg'...). Used for outliers and for legend, defaults to ''
:param zoom: zoom level, on top of calculated scale (+1, -1 etc)
"""
if marker is None:
marker = 'O' # commonly used, but * could also be used
x = df if column is None else df[column]
scale, pair, rows = stem_data(x, break_on=break_on, column=column, compact=compact,
display=display, leaf_order=leaf_order,
outliers=outliers, random_state=random_state,
scale=scale, trim=trim, zoom=zoom)
if legend_pos == 'top':
st, lf = pair.split('|')
print('Key: \n{} => {}.{}x{} = {} {}'.format(pair, st, lf, scale, key_calc(st, lf, scale), unit))
ordered_rows = rows if asc else rows[::-1]
for row in ordered_rows:
try:
st, lf = row.split('|')
print("{}|{}".format(st, 'O' * len(lf)))
except ValueError:
# no pipe in row, print as is
print(row)
if legend_pos is not None and legend_pos != 'top':
st, lf = pair.split('|')
print('Scale: \n{} => {}.{}x{} = {} {}'.format(pair, st, lf, scale, key_calc(st, lf, scale), unit))
def stem_text(df, asc=True, break_on=None, column=None, compact=False, display=300,
legend_pos='best', outliers=True, persistence=None,
random_state=None, scale=None, trim=False, unit='', zoom=None):
"""
:param df: list, numpy array, time series, pandas or dask dataframe
:param asc: stem sorted in ascending order, defaults to True
:param break_on: force a break of the leaves at x in (5, 10), defaults to 10
:param column: specify which column (string or number) of the dataframe to use,
else the first numerical is selected
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param legend_pos: One of 'top', 'bottom', 'best' or None, defaults to 'best'.
:param outliers: display outliers - these are from the full data set, not the sample. Defaults to Auto
:param persistence: filename. save sampled data to disk, either as pickle (.pkl) or csv (any other extension)
:param random_state: initial random seed for the sampling process, for reproducible research
:param scale: force a specific scale for building the plot. Defaults to None (automatic).
:param trim: ranges from 0 to 0.5 (50%) to remove from each end of the data set, defaults to None
:param unit: specify a string for the unit ('$', 'Kg'...). Used for outliers and for legend, defaults to ''
:param zoom: zoom level, on top of calculated scale (+1, -1 etc)
"""
x = df if column is None else df[column]
scale, pair, rows = stem_data(x, break_on=break_on, column=column, compact=compact,
display=display, outliers=outliers, persistence=persistence,
random_state=random_state, scale=scale, trim=trim, zoom=zoom)
if legend_pos == 'top':
st, lf = pair.split('|')
print('Key: \n{} => {}.{}x{} = {} {}'.format(pair, st, lf, scale, key_calc(st, lf, scale), unit))
ordered_rows = rows if asc else rows[::-1]
for row in ordered_rows:
print(row)
if legend_pos is not None and legend_pos != 'top':
st, lf = pair.split('|')
print('Key: \n{} => {}.{}x{} = {} {}'.format(pair, st, lf, scale, key_calc(st, lf, scale), unit))
| mit |
lemiere/python-lecture | tp_aleatoire/correction/brownien.py | 1 | 2434 | #!/usr/bin/python3.5
# -*- coding: utf-8 -*-
# Lemiere Yves
# Juillet 2017
import matplotlib.pyplot as plt
import random
import math
import numpy as np
def distance(x1,y1,x2,y2):
dx = (x2-x1)
dy = (y2-y1)
tmp_distance = math.sqrt(dx*dx+dy*dy)
return tmp_distance
def move(x,y,delta_x,delta_y):
next_x = 0
next_y = 0
next_x = x + delta_x
next_y = y + delta_y
return next_x,next_y
def choose_direction():
tmp_value = None
list_of_direction = ['W','S','E','N']
tmp_value = random.choice(list_of_direction)
return tmp_value
def prepare_mouvement(a_direction):
if a_direction == 'N':
delta_x = 0
delta_y = +1
elif a_direction == 'S':
delta_x = 0
delta_y = -1
elif a_direction == 'E':
delta_x = 1
delta_y = 0
elif a_direction == 'W':
delta_x = -1
delta_y = 0
else:
delta_x = 0
delta_y = 0
return delta_x,delta_y
def simulate(tmp_x, tmp_y,tmp_it):
current_x = tmp_x
current_y = tmp_y
a_x = []
a_y = []
for it in range(tmp_it):
the_direction = choose_direction()
dx,dy = prepare_mouvement(the_direction)
current_x,current_y = move(current_x,current_y,dx,dy)
a_x.append(current_x)
a_y.append(current_y)
return a_x,a_y
if __name__ == "__main__":
debug = False
if debug:
print("************************")
print("* Welcome in brownien *")
print("************************\n")
random.seed(None)
x_init = 0
y_init = 0
current_x = x_init
current_y = y_init
nb_of_iteration = 2000
final_distance = []
nb_simulation = 1000
for it in range(nb_simulation):
x = []
y = []
x,y = simulate(x_init,y_init,nb_of_iteration)
final_distance.append(distance(x_init,y_init,x[-1],y[-1]))
if debug :
print("Distance from initial position is : {}".format(final_distance[-1]))
plt.grid()
plt.scatter(x,y,c= range(len(x)), marker = '.', s=200,zorder=1)
plt.show()
print("mean distance for {} iterations after {} simulations : {}".format(nb_of_iteration,nb_simulation,np.mean(final_distance)))
plt.hist(final_distance)
plt.show()
| gpl-3.0 |
toobaz/pandas | pandas/tests/indexes/test_frozen.py | 2 | 3768 | import warnings
import numpy as np
import pytest
from pandas.core.indexes.frozen import FrozenList, FrozenNDArray
from pandas.tests.test_base import CheckImmutable, CheckStringMixin
from pandas.util import testing as tm
class TestFrozenList(CheckImmutable, CheckStringMixin):
mutable_methods = ("extend", "pop", "remove", "insert")
unicode_container = FrozenList(["\u05d0", "\u05d1", "c"])
def setup_method(self, _):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_iadd(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# Other shouldn't be mutated.
self.check_result(r, self.lst)
def test_union(self):
result = self.container.union((1, 2, 3))
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
def test_difference(self):
result = self.container.difference([2])
expected = FrozenList([1, 3, 4, 5])
self.check_result(result, expected)
def test_difference_dupe(self):
result = FrozenList([1, 2, 3, 2]).difference([2])
expected = FrozenList([1, 3])
self.check_result(result, expected)
def test_tricky_container_to_bytes_raises(self):
# GH 26447
msg = "^'str' object cannot be interpreted as an integer$"
with pytest.raises(TypeError, match=msg):
bytes(self.unicode_container)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin):
mutable_methods = ("put", "itemset", "fill")
def setup_method(self, _):
self.lst = [3, 5, 7, -2]
self.klass = FrozenNDArray
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
self.container = FrozenNDArray(self.lst)
self.unicode_container = FrozenNDArray(["\u05d0", "\u05d1", "c"])
def test_constructor_warns(self):
# see gh-9031
with tm.assert_produces_warning(FutureWarning):
FrozenNDArray([1, 2, 3])
def test_tricky_container_to_bytes(self):
bytes(self.unicode_container)
def test_shallow_copying(self):
original = self.container.copy()
assert isinstance(self.container.view(), FrozenNDArray)
assert not isinstance(self.container.view(np.ndarray), FrozenNDArray)
assert self.container.view() is not self.container
tm.assert_numpy_array_equal(self.container, original)
# Shallow copy should be the same too
assert isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container):
container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
tm.assert_numpy_array_equal(original, vals)
assert original is not vals
vals[0] = n
assert isinstance(self.container, FrozenNDArray)
tm.assert_numpy_array_equal(self.container.values(), original)
assert vals[0] == n
def test_searchsorted(self):
expected = 2
assert self.container.searchsorted(7) == expected
with tm.assert_produces_warning(FutureWarning):
assert self.container.searchsorted(v=7) == expected
| bsd-3-clause |
CCIce/Interesting-Project | 简单单元有限元计算/任意三角形单元划分.py | 1 | 7160 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from sympy import *
import copy
import matplotlib.pyplot as plt
# from Tkinter import *
import argparse
h, u = 1, 0
E = Symbol('E')
#节点荷载的列阵
Rx1, Rx2, Rx4,Ry4,Ry5,Ry6 = symbols('Rx1, Rx2, Rx4,Ry4,Ry5,Ry6')
#节点位移的列阵
v1, v2, u3, v3, u5, u6 = symbols('v1 v2 u3 v3 u5 u6')
s = []
def JiangWei(KK, n, Q):
# l需要删除矩阵的行列(找出列表中为零的序号)
#nn为划分的数
l = []
for i in xrange(n + 1):
node = TTy(i)
if i == n:
l.append(2 * (node - 1))
for j in xrange(node, node + n + 1):
l.append(2 * j - 1)
else:
l.append(2 * (node - 1))
# print l
# print len(l)
# 矩阵降维
p = 0
for i in l:
KK.col_del(i - p)
KK.row_del(i - p)
p += 1
# pprint (KK/E)
# print KK.shape
for i in xrange(KK.shape[0]-1):
Q = Q.row_insert(1, Matrix([[0]]))
# 解方程、位移的解
# d为方程的解
# pprint (KK)
KK = KK.col_insert(KK.shape[1], Q)
d = KK.rref()[0].col(-1)
# 有确定值的位移列阵(完整的)
for i in l:
d = d.row_insert(i,Matrix([[0]]))
# print d
return d
class Plane2D(object):
def __init__(self,xl,xm,xn,yl,ym,yn,l,m,n,KK):
self.l = l
self.m = m
self.n = n
self.xl = xl
self.xm = xm
self.xn = xn
self.yl = yl
self.ym = ym
self.yn = yn
self.KK = KK
"""坐标的矩阵"""
self.A = Matrix([[1,xl,yl], [1, xm, ym], [1, xn,yn]])
self.Area = 0.5 * self.A.det()
"""A的代数余子式"""
Al = []
for i in xrange(0, 3):
for j in xrange(1, 3):
Al.append(self.calCofactor(self.A, i, j))
self.B = 1 / (2 * self.Area) * Matrix([[Al[0], 0, Al[2], 0, Al[4], 0], [0, Al[1], 0, Al[3], 0, Al[5]], \
[Al[1], Al[0], Al[3], Al[2], Al[5], Al[4]]])
self.D = (E/(1-u**2)) * Matrix([[1, u, 0], [u, 1, 0], [0, 0, (1-u)*0.5]])
"""计算代数余子式"""
def calCofactor(self,A,i,j):
b = copy.deepcopy(A)
b.row_del(i)
b.col_del(j)
if (i+j)%2 == 0:
return b.det()
else:
return -b.det()
"""计算总刚"""
def krs(self):
Area = self.Area
A = self.A
KK = self.KK
B = self.B
Bl = B[ : , 0 : 2]
Bm = B[ : , 2 : 4]
Bn = B[ : , 4 : 6]
BList = {self.l:Bl, self.m:Bm, self.n:Bn}
for r in BList:
for s in BList:
krs = BList[r].T * self.D * BList[s] * h * Area
for i in xrange(2):
for j in xrange(2):
KK[2 * r + i - 2, 2 * s + j - 2] += krs[i, j]
return KK
def Stress(self,d):
l, m, n = self.l, self.m, self.n
A = Matrix([[1, self.xl, self.yl], [1, self.xm, self.ym], [1, self.xn, self.yn]])
B = self.B
D = (E / (1 - u ** 2)) * Matrix([[1, u, 0], [u, 1, 0], [0, 0, (1 - u) * 0.5]])
#k单元的位移
k = Matrix([0])
for i in [l,m,n]:
k = k.row_insert(-1,d.row(2*i-2))
k = k.row_insert(-1, d.row(2 * i - 1))
k.row_del(-1)
# pprint(k)
pprint (D*B*k)
# t.insert(END,'sj')
# return k
# def printStress(self, k):
# pprint (self.D*self.B*k)
def TTy(n, b=1, c=0):
if n == c:
return b+n
else:
return TTy(n, b=b+c, c=c+1)
def TTx(n, b=1, c=0):
if n == c:
return b+n
else:
return TTx(n, b=b+c+1, c=c+1)
#def TTx(n):
# if n == 0:
# return 1
# else:
# return n+1+TTx(n-1)
def dividedUnit(n):
"""
num是点的节点号码
n是划分的单元数
d是一小块的边长
"""
d = 2.0/n
y, x = 2.0, 0.0
xylist = [0]
#画点、点的注释
for i in xrange(n+1):
y = 2 - i*d
x = 0.0
for j in xrange(i+1):
x = j * d
xylist.append((x, y))
# print len(xylist)
# print xylist
#为单元划分
unitlist = []
jdhlist = []
for i in xrange(1, n + 1):
xStart = TTy(i)
xEnd = TTy(i)+i
sStart = TTy(i - 1)
flag = 1
while not xStart == xEnd:
if flag % 2 == 0:
unitlist.append([xylist[sStart], xylist[xStart], xylist[sStart + 1]])
jdhlist.append([sStart, xStart, sStart + 1])
sStart += 1
flag += 1
else:
unitlist.append([xylist[sStart], xylist[xStart], xylist[xStart + 1]])
jdhlist.append([sStart, xStart, xStart + 1])
xStart += 1
flag += 1
return unitlist, jdhlist
def drawingPlot(n, xy):
num = 1
d = 2.0 / n
# print 'd=%.2f' %d
y, x = 2.0, 0.0
# 画点、点的注释
for i in xrange(n + 1):
y = 2 - i * d
x = 0.0
for j in xrange(i + 1):
x = j * d
plt.plot(x, y, 'ro')
plt.text(x, y, num, color='blue', fontsize=10)
num+=1
for i in xrange(0,n):
plt.plot([0,2-i*d], [2-i*d,0],'r')
plt.plot([i*d,i*d],[0,2-(i)*d],'r')
plt.plot([0,d*(i+1)],[2-(i+1)*d,2-(i+1)*d],'r')
# print len(xy)
num=1
for i in xrange(len(xy)):
x = (xy[i][0][0]+xy[i][1][0]+xy[i][2][0])/3.0
y = (xy[i][0][1]+xy[i][1][1]+xy[i][2][1])/3.0
plt.text(x,y,num,color='green', fontsize=15)
num+=1
plt.axis([0, 2, 0, 2])
plt.grid(color='b', linewidth=0.5, linestyle='--')
plt.show()
def printStress(xy, jdh,d):
for i in xrange(len(jdh)):
Plane2D(xy[i][0][0],xy[i][1][0],xy[i][2][0],xy[i][0][1],xy[i][1][1],xy[i][2][1],\
jdh[i][0],jdh[i][1],jdh[i][2],KK).Stress(d)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", action="store", help="Divided unit", dest='n', type=int )
parser.add_argument("-k",action="store_false", help="zong gang")
parser.add_argument("-s", action="store_false", help="printing stress")
parser.add_argument("-d", action="store_false", help="drawing picture")
args = parser.parse_args()
node = 2*(TTy(args.n)+args.n)
KK = zeros(node, node)
xy, jdh = dividedUnit(args.n)
Q = Matrix([-10])
# xl,xm,xn,yl,ym,yn,l,m,n
for i in xrange(len(jdh)):
KK = Plane2D(xy[i][0][0],xy[i][1][0],xy[i][2][0],xy[i][0][1],xy[i][1][1],xy[i][2][1],\
jdh[i][0],jdh[i][1],jdh[i][2],KK).krs()
if not args.k:
# print "------------总刚------------"
pprint (KK)
d = JiangWei(KK, args.n, Q)
if not args.s:
# print "------------应力------------"
printStress(xy,jdh,d)
if not args.d:
# d = JiangWei(KK, args.n, Q)
drawingPlot(args.n,xy)
# pprint (KK*1/E)
# pprint (d*E)
# print '----------------------------------------'
# drawingPlot(n, xy)
| mit |
f3r/scikit-learn | sklearn/datasets/samples_generator.py | 20 | 56502 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
willgrass/pandas | pandas/core/series.py | 1 | 33974 | """
Data structure for 1-dimensional cross-sectional and time series data
"""
# pylint: disable-msg=E1101,E1103
# pylint: disable-msg=W0703,W0622
import itertools
import sys
from numpy import NaN, ndarray
import numpy as np
from pandas.core.common import isnull, notnull
from pandas.core.daterange import DateRange
from pandas.core.index import Index, NULL_INDEX
from pandas.core.mixins import Picklable, Groupable
import pandas.core.datetools as datetools
import pandas.lib.tseries as tseries
#-------------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _seriesOpWrap(opname):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
MIRROR_OPS = {
'__add__' : '__radd__',
'__sub__' : '__rsub__',
'__div__' : '__rdiv__',
'__mul__' : '__rmul__',
}
def wrapper(self, other):
from pandas.core.frame import DataFrame
func = getattr(self.values, opname)
if isinstance(other, Series):
if self.index.equals(other.index):
return Series(func(other.values), index=self.index)
newIndex = self.index + other.index
try:
if self.dtype != np.float_:
this = self.astype(float)
else:
this = self
if other.dtype != np.float_:
other = other.astype(float)
# buffered Cython function expects double type
arr = tseries.combineFunc(opname, newIndex,
this, other,
self.index.indexMap,
other.index.indexMap)
except Exception:
arr = Series.combineFunc(self, other,
getattr(type(self[0]), opname))
result = Series(arr, index=newIndex)
return result
elif isinstance(other, DataFrame):
reverse_op = MIRROR_OPS.get(opname)
if reverse_op is None:
raise Exception('Cannot do %s op, sorry!')
return getattr(other, reverse_op)(self)
else:
return Series(func(other), index=self.index)
return wrapper
#-------------------------------------------------------------------------------
# Series class
class Series(np.ndarray, Picklable, Groupable):
"""
Generic indexed (labeled) vector (time series or cross-section)
Contains values in a numpy-ndarray with an optional bound index
(also an array of dates, strings, or whatever you want the 'row
names' of your series to be)
Rows can be retrieved by index value (date, string, etc.) or
relative position in the underlying array.
Operations between Series (+, -, /, *, **) align values based on
their associated index values-- they need not be the same length.
Parameters
----------
data : array-like or dict
Contains data stored in Series
index : array-like
Index object (or other iterable of same length as data)
Must be input if first argument is not a dict. If both a dict
and index sequence are used, the index will override the keys
found in the dict.
Notes
-----
If you combine two series, all values for an index position must
be present or the value for that index position will be nan. The
new index is the sorted union of the two Series indices.
"""
def __new__(cls, data, index=None, dtype=None, copy=False):
if isinstance(data, Series):
if index is None:
index = data.index
elif isinstance(data, dict):
if index is None:
index = Index(sorted(data.keys()))
data = [data[idx] for idx in index]
# Make a copy of the data, infer type
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except ValueError:
if dtype:
raise
subarr = np.array(data, dtype=object)
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
else:
return subarr.item()
elif subarr.ndim > 1:
raise Exception('Data must be 1-dimensional')
if index is None:
raise Exception('Index cannot be None!')
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, basestring):
subarr = np.array(data, dtype=object, copy=copy)
# Change the class of the array to be the subclass type.
subarr = subarr.view(cls)
subarr.index = index
if subarr.index._allDates:
subarr = subarr.view(TimeSeries)
return subarr
def __hash__(self):
raise TypeError('unhashable type')
_index = None
def _get_index(self):
return self._index
def _set_index(self, index):
indexTypes = ndarray, Index, list, tuple
if not isinstance(index, indexTypes):
raise TypeError("Expected index to be in %s; was %s."
% (indexTypes, type(index)))
if len(self) != len(index):
raise AssertionError('Lengths of index and values did not match!')
if not isinstance(index, Index):
index = Index(index)
self._index = index
index = property(fget=_get_index, fset=_set_index)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self._index = getattr(obj, '_index', None)
def toDict(self):
return dict(self.iteritems())
@classmethod
def fromValue(cls, value=np.NaN, index=None, dtype=None):
"""
Create Series with all values being the input scalar
Parameters
----------
input : dict object
Keys become indices of returned Series
kwds : optionally provide arguments as keywords
Returns
-------
y : Series
"""
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
if isinstance(value, basestring):
dtype = np.object_
if dtype is None:
arr = np.empty(len(index), dtype=type(value))
else:
arr = np.empty(len(index), dtype=dtype)
arr.fill(value)
return Series(arr, index=index)
def __contains__(self, key):
return key in self.index
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
subclass_state = (self.index, )
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
index, = own_state
self.index = index
def __getitem__(self, key):
"""
Returns item(s) for requested index/sequence, overrides default behavior
for series[key].
Logic is as follows:
- If key is in the index, return the value corresponding
to that index
- Otherwise, use key (presumably one integer or a sequence
of integers) to obtain values from the series. In the case
of a sequence, a 'slice' of the series (with corresponding dates)
will be returned, otherwise a single value.
"""
values = self.values
try:
# Check that we can even look for this in the index
return values[self.index.indexMap[key]]
except KeyError:
if isinstance(key, int):
return values[key]
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item
pass
# is there a case where this would NOT be an ndarray?
# need to find an example, I took out the case for now
dataSlice = values[key]
indices = Index(self.index.view(ndarray)[key])
return Series(dataSlice, index=indices)
def get(self, key, default=None):
"""
Returns value occupying requested index, default to specified
missing value if not present
Parameters
----------
key : object
Index value looking for
default : object, optional
Value to return if key not in index
Returns
-------
y : scalar
"""
if key in self.index:
return ndarray.__getitem__(self, self.index.indexMap[key])
else:
return default
def __getslice__(self, i, j):
"""
Returns a slice of the Series.
Note that the underlying values are COPIES.
The reason that the getslice returns copies is that otherwise you
will have a reference to the original series which could be
inadvertently changed if the slice were altered (made mutable).
"""
newArr = self.values[i:j].copy()
newIndex = self.index[i:j]
return Series(newArr, index=newIndex)
def __setitem__(self, key, value):
"""
If this series is mutable, set specified indices equal to given values.
"""
try:
loc = self.index.indexMap[key]
ndarray.__setitem__(self, loc, value)
except Exception:
values = self.values
values[key] = value
def __setslice__(self, i, j, value):
"""Set slice equal to given value(s)"""
ndarray.__setslice__(self, i, j, value)
def __repr__(self):
"""Clean string representation of a Series"""
vals = self.values
index = self.index
if len(index) > 500:
head = _seriesRepr(index[:50], vals[:50])
tail = _seriesRepr(index[-50:], vals[-50:])
return head + '\n...\n' + tail + '\nlength: %d' % len(vals)
elif len(index) > 0:
return _seriesRepr(index, vals)
else:
return '%s' % ndarray.__repr__(self)
def toString(self, buffer=sys.stdout, nanRep='NaN'):
print >> buffer, _seriesRepr(self.index, self.values,
nanRep=nanRep)
def __str__(self):
return repr(self)
def __iter__(self):
return iter(self.values)
def copy(self):
return Series(self.values.copy(), index=self.index)
#-------------------------------------------------------------------------------
# Arithmetic operators
__add__ = _seriesOpWrap('__add__')
__sub__ = _seriesOpWrap('__sub__')
__mul__ = _seriesOpWrap('__mul__')
__div__ = _seriesOpWrap('__div__')
__pow__ = _seriesOpWrap('__pow__')
# Inplace operators
__iadd__ = __add__
__isub__ = __sub__
__imul__ = __mul__
__idiv__ = __div__
__ipow__ = __pow__
#-------------------------------------------------------------------------------
# Statistics, overridden ndarray methods
def count(self):
"""
Return number of observations of Series.
Returns
-------
nobs : int
"""
return notnull(self.values).sum()
def sum(self, axis=None, dtype=None, out=None):
"""
Compute sum of non-null values
"""
return self._ndarray_statistic('sum')
def mean(self, axis=None, dtype=None, out=None):
"""
Compute mean of non-null values
"""
return self._ndarray_statistic('mean')
def _ndarray_statistic(self, funcname):
arr = self.values
retVal = getattr(arr, funcname)()
if isnull(retVal):
arr = remove_na(arr)
retVal = getattr(arr, funcname)()
return retVal
def min(self, axis=None, out=None):
"""
Compute minimum of non-null values
"""
arr = self.values.copy()
if not issubclass(arr.dtype.type, np.int_):
arr[isnull(arr)] = np.inf
return arr.min()
def max(self, axis=None, out=None):
"""
Compute maximum of non-null values
"""
arr = self.values.copy()
if not issubclass(arr.dtype.type, np.int_):
arr[isnull(arr)] = -np.inf
return arr.max()
def std(self, axis=None, dtype=None, out=None, ddof=1):
"""
Compute unbiased standard deviation of non-null values
"""
nona = remove_na(self.values)
if len(nona) < 2:
return NaN
return ndarray.std(nona, axis, dtype, out, ddof)
def var(self, axis=None, dtype=None, out=None, ddof=1):
"""
Compute unbiased variance of non-null values
"""
nona = remove_na(self.values)
if len(nona) < 2:
return NaN
return ndarray.var(nona, axis, dtype, out, ddof)
def skew(self):
"""
Computes the skewness of the non-null values
Returns
-------
skew : float
"""
y = np.array(self.values)
mask = notnull(y)
count = mask.sum()
np.putmask(y, -mask, 0)
A = y.sum() / count
B = (y**2).sum() / count - A**2
C = (y**3).sum() / count - A**3 - 3*A*B
return (np.sqrt((count**2-count))*C) / ((count-2)*np.sqrt(B)**3)
def cumsum(self, axis=0, dtype=None, out=None):
"""
Overriding numpy's built-in cumsum functionality
"""
arr = self.copy()
okLocs = notnull(arr)
result = np.cumsum(arr.view(ndarray)[okLocs])
arr = arr.astype(result.dtype)
arr[okLocs] = result
return arr
def cumprod(self, axis=0, dtype=None, out=None):
"""
Overriding numpy's built-in cumprod functionality
"""
arr = self.copy()
okLocs = notnull(arr)
arr[okLocs] = np.cumprod(arr.view(ndarray)[okLocs])
return arr
def median(self):
"""
Compute median value of non-null values
"""
arr = self.values
arr = arr[notnull(arr)]
return tseries.median(arr)
def corr(self, other):
"""
Compute correlation two Series, excluding missing values
Parameters
----------
other : Series object
Returns
-------
correlation : float
"""
commonIdx = remove_na(self).index.intersection(remove_na(other).index)
if len(commonIdx) == 0:
return NaN
this = self.reindex(commonIdx)
that = other.reindex(commonIdx)
return np.corrcoef(this, that)[0, 1]
def diff(self):
"""
1st discrete difference of object
Returns
-------
TimeSeries
"""
return (self - self.shift(1))
def autocorr(self):
"""
Lag-1 autocorrelation
Returns
-------
TimeSeries
"""
return self.corr(self.shift(1))
def clip(self, upper=None, lower=None):
"""
Trim values at input threshold(s)
Parameters
----------
lower : float, default None
upper : float, default None
Returns
-------
y : Series
"""
result = self
if lower is not None:
result = result.clip_lower(lower)
if upper is not None:
result = result.clip_upper(upper)
return result
def clip_upper(self, threshold):
"""Return copy of series with values above given value truncated"""
return np.where(self > threshold, threshold, self)
def clip_lower(self, threshold):
"""Return copy of series with values below given value truncated"""
return np.where(self < threshold, threshold, self)
#-------------------------------------------------------------------------------
# Iteration
def keys(self):
"Alias for Series index"
return self.index
@property
def values(self):
"""
Return Series as ndarray
Returns
-------
arr : numpy.ndarray
"""
return self.view(ndarray)
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
"""
return itertools.izip(iter(self.index), iter(self))
#-------------------------------------------------------------------------------
# Combination
def append(self, other):
"""
Concatenate two Series. The indices should not overlap
Parameters
----------
other : Series
Returns
-------
y : Series
"""
newIndex = np.concatenate((self.index, other.index))
# Force overlap check
try:
newIndex = Index(newIndex)
except Exception:
raise
newValues = np.concatenate((self, other))
return Series(newValues, index=newIndex)
def combineFunc(self, other, func):
"""
Combines this Series using the given function with either
* another Series index by index
* a scalar value
* DataFrame
Parameters
----------
other : {Series, DataFrame, scalar value}
Returns
-------
y : {Series or DataFrame}
Output depends on input. If a DataFrame is inputted, that
will be the return type.
"""
if isinstance(other, Series):
newIndex = self.index + other.index
newArr = np.empty(len(newIndex), dtype=self.dtype)
for i, idx in enumerate(newIndex):
newArr[i] = func(self.get(idx, NaN), other.get(idx, NaN))
else:
newIndex = self.index
newArr = func(self.values, other)
return Series(newArr, index=newIndex)
def combineFirst(self, other):
"""
Combine Series values, choosing calling Series's values first.
Parameters
----------
other : Series
Returns
-------
y : Series
formed as union of two Series
"""
if self.index.equals(other.index):
newIndex = self.index
# save ourselves the copying in this case
this = self
else:
newIndex = self.index + other.index
this = self.reindex(newIndex)
other = other.reindex(newIndex)
result = Series(np.where(isnull(this), other, this), index=newIndex)
return result
#-------------------------------------------------------------------------------
# Reindexing, sorting
def sort(self, axis=0, kind='quicksort', order=None):
"""
Overridden NumPy sort, taking care with missing values
"""
sortedSeries = self.order(missingAtEnd=True)
self[:] = sortedSeries
self.index = sortedSeries.index
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overriding numpy's built-in cumsum functionality
"""
values = self.values
mask = isnull(values)
if mask.any():
result = values.copy()
notmask = -mask
result[notmask] = np.argsort(values[notmask])
return Series(result, index=self.index)
else:
return Series(np.argsort(values), index=self.index)
def order(self, missingAtEnd=True):
"""
Sorts Series object, by value, maintaining index-value object
Parameters
----------
missingAtEnd : boolean (optional, default=True)
Put NaN's at beginning or end
In general, AVOID sorting Series unless you absolutely need to.
Returns
-------
y : Series
sorted by values
"""
def _try_mergesort(arr):
# easier to ask forgiveness than permission
try:
return arr.argsort(kind='mergesort')
except TypeError:
# stable sort not available for object dtype
return arr.argsort()
arr = self.values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isnull(arr)
good = -bad
idx = np.arange(len(self))
if missingAtEnd:
n = sum(good)
sortedIdx[:n] = idx[good][_try_mergesort(arr[good])]
sortedIdx[n:] = idx[bad]
else:
n = sum(bad)
sortedIdx[n:] = idx[good][_try_mergesort(arr[good])]
sortedIdx[:n] = idx[bad]
return Series(arr[sortedIdx], index=self.index[sortedIdx])
def map(self, arg):
"""
Map values of Series using input correspondence (which can be
a dict, Series, or function).
Parameters
----------
arg : function, dict, or Series
Returns
-------
y : Series
same index as caller
"""
if isinstance(arg, (dict, Series)):
if isinstance(arg, dict):
arg = Series(arg)
indexer, mask = tseries.getMergeVec(self, arg.index.indexMap)
newValues = arg.view(np.ndarray).take(indexer)
np.putmask(newValues, -mask, np.nan)
newSer = Series(newValues, index=self.index)
return newSer
else:
return Series([arg(x) for x in self], index=self.index)
merge = map
def reindex(self, newIndex, fillMethod=None):
"""Overloaded version of reindex for TimeSeries. Supports filling
with values based on new index.
See analogous method for DataFrame, will be faster for multiple
TimeSeries
Parameters
----------
newIndex : array-like, preferably an Index object (to avoid
duplicating data)
fillMethod : {'backfill', 'pad', 'interpolate', None}
Method to use for filling holes in reindexed Series
Returns
-------
TimeSeries
"""
if self.index.equals(newIndex):
return self.copy()
if not isinstance(newIndex, Index):
newIndex = Index(newIndex)
if len(self.index) == 0:
return Series.fromValue(NaN, index=newIndex)
if fillMethod is not None:
fillMethod = fillMethod.upper()
# Cython for blazing speed
fillVec, mask = tseries.getFillVec(self.index, newIndex,
self.index.indexMap,
newIndex.indexMap,
kind=fillMethod)
newValues = self.values.take(fillVec)
notmask = -mask
if notmask.any():
if issubclass(newValues.dtype.type, np.int_):
newValues = newValues.astype(float)
elif issubclass(newValues.dtype.type, np.bool_):
newValues = newValues.astype(object)
np.putmask(newValues, notmask, NaN)
return Series(newValues, index=newIndex)
def fill(self, value=None, method='pad'):
"""
Fill NaN values using the specified method.
Parameters
----------
value : any kind (should be same type as array)
Value to use to fill holes (e.g. 0)
method : {'backfill', 'pad', None}
Method to use for filling holes in new inde
Returns
-------
TimeSeries with NaN's filled
See also
--------
reindex, asfreq
"""
if value is not None:
newSeries = self.copy()
newSeries[isnull(newSeries)] = value
return newSeries
else: # Using reindex to pad / backfill
withoutna = remove_na(self)
return withoutna.reindex(self.index, fillMethod=method)
#-------------------------------------------------------------------------------
# Miscellaneous
def plot(self, label=None, kind='line', rot=30, **kwds): # pragma: no cover
"""
Plot the input series with the index on the x-axis using
matplotlib / pylab.
Parameters
----------
label : label argument to provide to plot
kind : {'line', 'bar', 'hist'}
Default: line for TimeSeries, hist for Series
kwds : other plotting keyword arguments
Notes
-----
See matplotlib documentation online for more on this subject
Default plot-types: TimeSeries (line), Series (bar)
Intended to be used in ipython -pylab mode
"""
import matplotlib.pyplot as plt
if label is not None:
kwds = kwds.copy()
kwds['label'] = label
N = len(self)
if kind == 'line':
plt.plot(self.index, self.values, **kwds)
elif kind == 'bar':
xinds = np.arange(N) + 0.25
plt.bar(xinds, self.values, 0.5, bottom=np.zeros(N), linewidth=1)
if N < 10:
fontsize = 12
else:
fontsize = 10
plt.xticks(xinds + 0.25, self.index, rotation=rot,
fontsize=fontsize)
def toCSV(self, path):
"""
Write the Series to a CSV file
Parameters
----------
path : string or None
Output filepath. If None, write to stdout
"""
f = open(path, 'wb')
for idx, value in self.iteritems():
f.write(str(idx) + ',' + str(value) + ',\n')
f.close()
def valid(self):
"""
Return Series without NaN values
Returns
-------
Series
"""
return remove_na(self)
def _firstTimeWithValue(self):
noNA = remove_na(self)
if len(noNA) > 0:
return noNA.index[0]
else:
return None
def _lastTimeWithValue(self):
noNA = remove_na(self)
if len(noNA) > 0:
return noNA.index[-1]
else:
return None
#-------------------------------------------------------------------------------
# Time series-oriented methods
def shift(self, periods, offset=None, timeRule=None):
"""
Shift the underlying series of the DataMatrix and Series objects within
by given number (positive or negative) of business/weekdays.
Parameters
----------
periods : int (+ or -)
Number of periods to move
offset : DateOffset, optional
Increment to use from datetools module
timeRule : string
time rule name to use by name (e.g. 'WEEKDAY')
Returns
-------
TimeSeries
"""
if periods == 0:
return self.copy()
if timeRule is not None and offset is None:
offset = datetools.getOffset(timeRule)
if offset is None:
newValues = np.empty(len(self), dtype=self.dtype)
if periods > 0:
newValues[periods:] = self.values[:-periods]
newValues[:periods] = np.NaN
elif periods < 0:
newValues[:periods] = self.values[-periods:]
newValues[periods:] = np.NaN
return Series(newValues, index=self.index)
else:
newIndex = self.index.shift(periods, offset)
return Series(self, index=newIndex)
def truncate(self, before=None, after=None):
"""Function truncate a sorted TimeSeries before and/or after
some particular dates.
Parameters
----------
before : date
Truncate before date
after : date
Truncate after date
Notes
-----
If TimeSeries is contained in a DataFrame, consider using the version
of the function there.
Returns
-------
TimeSeries
"""
before = datetools.to_datetime(before)
after = datetools.to_datetime(after)
if before is None:
beg_slice = 0
elif before in self.index:
beg_slice = self.index.indexMap[before]
elif before < self.index[-1]:
beg_slice = self.index.searchsorted(before, side='left')
else:
return Series([], index=NULL_INDEX)
if after is None:
end_slice = len(self)
elif after in self.index:
end_slice = self.index.indexMap[after] + 1
elif after > self.index[0]:
end_slice = self.index.searchsorted(after, side='right')
else:
return Series([], index=NULL_INDEX)
return self[beg_slice:end_slice]
def asOf(self, date):
"""
Return last good (non-NaN) value in TimeSeries if value is NaN for
requested date.
If there is no good value, NaN is returned.
Parameters
----------
date : datetime or similar value
Notes
-----
Dates are assumed to be sorted
Returns
-------
value or NaN
"""
if isinstance(date, basestring):
date = datetools.to_datetime(date)
v = self.get(date)
if isnull(v):
candidates = self.index[notnull(self)]
index = candidates.searchsorted(date)
if index > 0:
asOfDate = candidates[index - 1]
else:
return NaN
return self.get(asOfDate)
else:
return v
def asfreq(self, freq, fillMethod=None):
"""
Convert this TimeSeries to the provided frequency using DateOffset
objects. Optionally provide fill method to pad/backfill/interpolate
missing values.
Parameters
----------
offset : DateOffset object, or string in {'WEEKDAY', 'EOM'}
DateOffset object or subclass (e.g. monthEnd)
fillMethod : {'backfill', 'pad', 'interpolate', None}
Method to use for filling holes in new inde
Returns
-------
TimeSeries
"""
if isinstance(freq, datetools.DateOffset):
dateRange = DateRange(self.index[0], self.index[-1], offset=freq)
else:
dateRange = DateRange(self.index[0], self.index[-1], timeRule=freq)
return self.reindex(dateRange, fillMethod=fillMethod)
def interpolate(self, method='linear'):
"""
Interpolate missing values (after the first valid value)
Parameters
----------
method : {'linear', 'time'}
Interpolation method.
Time interpolation works on daily and higher resolution
data to interpolate given length of interval
Returns
-------
Series with values interpolated
"""
if method == 'time':
if not isinstance(self, TimeSeries):
raise Exception('time-weighted interpolation only works'
'on TimeSeries')
inds = np.array([d.toordinal() for d in self.index])
else:
inds = np.arange(len(self))
values = self.values
invalid = isnull(values)
valid = -invalid
firstIndex = valid.argmax()
valid = valid[firstIndex:]
invalid = invalid[firstIndex:]
inds = inds[firstIndex:]
result = values.copy()
result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],
values[firstIndex:][valid])
return Series(result, index=self.index)
def rename(self, mapper):
"""
Alter Series index using dict or function
Parameters
----------
mapper : dict-like or function
Transformation to apply to each index
Notes
-----
Function / dict values must be unique (1-to-1)
Returns
-------
y : Series (new object)
"""
if isinstance(mapper, (dict, Series)):
mapper = mapper.__getitem__
result = self.copy()
result.index = [mapper(x) for x in self.index]
return result
@property
def weekday(self):
return Series([d.weekday() for d in self.index],
index=self.index)
class TimeSeries(Series):
pass
#-------------------------------------------------------------------------------
# Supplementary functions
def remove_na(arr):
"""
Return array containing only true/non-NaN values, possibly empty.
"""
return arr[notnull(arr)]
def _seriesRepr(index, vals, nanRep='NaN'):
string_index = [str(x) for x in index]
maxlen = max(len(x) for x in string_index)
padSpace = min(maxlen, 60)
if vals.dtype == np.object_:
def _format(k, v):
return '%s %s' % (str(k).ljust(padSpace), v)
elif vals.dtype == np.float_:
def _format(k, v):
if np.isnan(v):
v = nanRep
else:
v = str(v)
return '%s %s' % (str(k).ljust(padSpace), v)
else:
def _format(k, v):
return '%s %s' % (str(k).ljust(padSpace), v)
it = itertools.starmap(_format,
itertools.izip(string_index, vals))
return '\n'.join(it)
| bsd-3-clause |
harshaneelhg/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
ankurankan/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
mxamin/coursera | coursera/api.py | 1 | 51527 | # vim: set fileencoding=utf8 :
"""
This module contains implementations of different APIs that are used by the
downloader.
"""
import os
import json
import base64
import logging
import time
import requests
import urllib
from collections import namedtuple
from six import iterkeys, iteritems
from six.moves.urllib_parse import quote_plus
from .utils import (BeautifulSoup, make_coursera_absolute_url,
extend_supplement_links, clean_url, clean_filename,
is_debug_run, unescape_html)
from .network import get_reply, get_page, post_page_and_reply
from .define import (OPENCOURSE_SUPPLEMENT_URL,
OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL,
OPENCOURSE_ASSET_URL,
OPENCOURSE_ASSETS_URL,
OPENCOURSE_API_ASSETS_V1_URL,
OPENCOURSE_ONDEMAND_COURSE_MATERIALS,
OPENCOURSE_VIDEO_URL,
OPENCOURSE_MEMBERSHIPS,
OPENCOURSE_REFERENCES_POLL_URL,
OPENCOURSE_REFERENCE_ITEM_URL,
OPENCOURSE_PROGRAMMING_IMMEDIATE_INSTRUCTIOINS_URL,
# New feature, Notebook (Python Jupyter)
OPENCOURSE_NOTEBOOK_DESCRIPTIONS,
OPENCOURSE_NOTEBOOK_LAUNCHES,
OPENCOURSE_NOTEBOOK_TREE,
OPENCOURSE_NOTEBOOK_DOWNLOAD,
POST_OPENCOURSE_API_QUIZ_SESSION,
POST_OPENCOURSE_API_QUIZ_SESSION_GET_STATE,
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS,
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS_GET_STATE,
INSTRUCTIONS_HTML_INJECTION_PRE,
INSTRUCTIONS_HTML_MATHJAX_URL,
INSTRUCTIONS_HTML_INJECTION_AFTER,
IN_MEMORY_EXTENSION,
IN_MEMORY_MARKER)
from .cookies import prepape_auth_headers
class QuizExamToMarkupConverter(object):
"""
Converts quiz/exam JSON into semi HTML (Coursera Markup) for local viewing.
The output needs to be further processed by MarkupToHTMLConverter.
"""
KNOWN_QUESTION_TYPES = ('mcq',
'mcqReflect',
'checkbox',
'singleNumeric',
'textExactMatch',
'mathExpression',
'regex',
'reflect')
# TODO: support live MathJAX preview rendering for mathExpression
# and regex question types
KNOWN_INPUT_TYPES = ('textExactMatch',
'singleNumeric',
'mathExpression',
'regex',
'reflect')
def __init__(self, session):
self._session = session
def __call__(self, quiz_or_exam_json):
result = []
for question_index, question_json in enumerate(quiz_or_exam_json['questions']):
question_type = question_json['question']['type']
if question_type not in self.KNOWN_QUESTION_TYPES:
logging.info('Unknown question type: %s', question_type)
logging.info('Question json: %s', question_json)
logging.info('Please report class name, quiz name and the data'
' above to coursera-dl authors')
prompt = question_json['variant']['definition']['prompt']
options = question_json['variant']['definition'].get('options', [])
# Question number
result.append('<h3>Question %d</h3>' % (question_index + 1))
# Question text
question_text = unescape_html(prompt['definition']['value'])
result.append(question_text)
# Input for answer
if question_type in self.KNOWN_INPUT_TYPES:
result.extend(self._generate_input_field())
# Convert input_type from JSON reply to HTML input type
input_type = {
'mcq': 'radio',
'mcqReflect': 'radio',
'checkbox': 'checkbox'
}.get(question_type, '')
# Convert options, they are either checkboxes or radio buttons
result.extend(self._convert_options(
question_index, options, input_type))
result.append('<hr>')
return '\n'.join(result)
def _convert_options(self, question_index, options, input_type):
if not options:
return []
result = ['<form>']
for option in options:
option_text = unescape_html(option['display']['definition']['value'])
# We need to replace <text> with <span> so that answer text
# stays on the same line with checkbox/radio button
option_text = self._replace_tag(option_text, 'text', 'span')
result.append('<label><input type="%s" name="%s">'
'%s<br></label>' % (
input_type, question_index, option_text))
result.append('</form>')
return result
def _replace_tag(self, text, initial_tag, target_tag):
soup = BeautifulSoup(text)
while soup.find(initial_tag):
soup.find(initial_tag).name = target_tag
return soup.prettify()
def _generate_input_field(self):
return ['<form><label>Enter answer here:<input type="text" '
'name=""><br></label></form>']
class MarkupToHTMLConverter(object):
def __init__(self, session, mathjax_cdn_url=None):
self._session = session
self._asset_retriever = AssetRetriever(session)
if not mathjax_cdn_url:
mathjax_cdn_url = INSTRUCTIONS_HTML_MATHJAX_URL
self._mathjax_cdn_url = mathjax_cdn_url
def __call__(self, markup):
"""
Convert instructions markup to make it more suitable for
offline reading.
@param markup: HTML (kinda) markup to prettify.
@type markup: str
@return: Prettified HTML with several markup tags replaced with HTML
equivalents.
@rtype: str
"""
soup = BeautifulSoup(markup)
self._convert_markup_basic(soup)
self._convert_markup_images(soup)
self._convert_markup_audios(soup)
return soup.prettify()
def _convert_markup_basic(self, soup):
"""
Perform basic conversion of instructions markup. This includes
replacement of several textual markup tags with their HTML equivalents.
@param soup: BeautifulSoup instance.
@type soup: BeautifulSoup
"""
# Inject meta charset tag
meta = soup.new_tag('meta', charset='UTF-8')
soup.insert(0, meta)
# 1. Inject basic CSS style
css = "".join([
INSTRUCTIONS_HTML_INJECTION_PRE,
self._mathjax_cdn_url,
INSTRUCTIONS_HTML_INJECTION_AFTER])
css_soup = BeautifulSoup(css)
soup.append(css_soup)
# 2. Replace <text> with <p>
while soup.find('text'):
soup.find('text').name = 'p'
# 3. Replace <heading level="1"> with <h1>
while soup.find('heading'):
heading = soup.find('heading')
heading.name = 'h%s' % heading.attrs.get('level', '1')
# 4. Replace <code> with <pre>
while soup.find('code'):
soup.find('code').name = 'pre'
# 5. Replace <list> with <ol> or <ul>
while soup.find('list'):
list_ = soup.find('list')
type_ = list_.attrs.get('bullettype', 'numbers')
list_.name = 'ol' if type_ == 'numbers' else 'ul'
def _convert_markup_images(self, soup):
"""
Convert images of instructions markup. Images are downloaded,
base64-encoded and inserted into <img> tags.
@param soup: BeautifulSoup instance.
@type soup: BeautifulSoup
"""
# 6. Replace <img> assets with actual image contents
images = [image for image in soup.find_all('img')
if image.attrs.get('assetid') is not None]
if not images:
return
# Get assetid attribute from all images
asset_ids = [image.attrs.get('assetid') for image in images]
self._asset_retriever(asset_ids)
for image in images:
# Encode each image using base64
asset = self._asset_retriever[image['assetid']]
if asset.data is not None:
encoded64 = base64.b64encode(asset.data).decode()
image['src'] = 'data:%s;base64,%s' % (asset.content_type, encoded64)
def _convert_markup_audios(self, soup):
"""
Convert audios of instructions markup. Audios are downloaded,
base64-encoded and inserted as <audio controls> <source> tag.
@param soup: BeautifulSoup instance.
@type soup: BeautifulSoup
"""
# 7. Replace <asset> audio assets with actual audio contents
audios = [audio for audio in soup.find_all('asset')
if audio.attrs.get('id') is not None
and audio.attrs.get('assettype') == 'audio']
if not audios:
return
# Get assetid attribute from all audios
asset_ids = [audio.attrs.get('id') for audio in audios]
self._asset_retriever(asset_ids)
for audio in audios:
# Encode each audio using base64
asset = self._asset_retriever[audio['id']]
if asset.data is not None:
encoded64 = base64.b64encode(asset.data).decode()
data_string = 'data:%s;base64,%s' % (asset.content_type, encoded64)
source_tag = soup.new_tag('source', src=data_string, type=asset.content_type)
controls_tag = soup.new_tag('audio', controls="")
controls_tag.string = 'Your browser does not support the audio element.'
controls_tag.append(source_tag)
audio.insert_after(controls_tag)
class OnDemandCourseMaterialItems(object):
"""
Helper class that allows accessing lecture JSONs by lesson IDs.
"""
def __init__(self, items):
"""
Initialization. Build a map from lessonId to Lecture (item)
@param items: linked.OnDemandCourseMaterialItems key of
OPENCOURSE_ONDEMAND_COURSE_MATERIALS response.
@type items: dict
"""
# Build a map of lessonId => Item
self._items = dict((item['lessonId'], item) for item in items)
@staticmethod
def create(session, course_name):
"""
Create an instance using a session and a course_name.
@param session: Requests session.
@type session: requests.Session
@param course_name: Course name (slug) from course json.
@type course_name: str
@return: Instance of OnDemandCourseMaterialItems
@rtype: OnDemandCourseMaterialItems
"""
dom = get_page(session, OPENCOURSE_ONDEMAND_COURSE_MATERIALS,
json=True,
class_name=course_name)
return OnDemandCourseMaterialItems(
dom['linked']['onDemandCourseMaterialItems.v1'])
def get(self, lesson_id):
"""
Return lecture by lesson ID.
@param lesson_id: Lesson ID.
@type lesson_id: str
@return: Lesson JSON.
@rtype: dict
Example:
{
"id": "AUd0k",
"moduleId": "0MGvs",
"lessonId": "QgCuM",
"name": "Programming Assignment 1: Decomposition of Graphs",
"slug": "programming-assignment-1-decomposition-of-graphs",
"timeCommitment": 10800000,
"content": {
"typeName": "gradedProgramming",
"definition": {
"programmingAssignmentId": "zHzR5yhHEeaE0BKOcl4zJQ@2",
"gradingWeight": 20
}
},
"isLocked": true,
"itemLockedReasonCode": "PREMIUM",
"trackId": "core"
},
"""
return self._items.get(lesson_id)
class Asset(namedtuple('Asset', 'id name type_name url content_type data')):
"""
This class contains information about an asset.
"""
__slots__ = ()
def __repr__(self):
return 'Asset(id="%s", name="%s", type_name="%s", url="%s", content_type="%s", data="<...>")' % (
self.id, self.name, self.type_name, self.url, self.content_type)
class AssetRetriever(object):
"""
This class helps download assets by their ID.
"""
def __init__(self, session):
self._session = session
self._asset_mapping = {}
def __getitem__(self, asset_id):
return self._asset_mapping[asset_id]
def __call__(self, asset_ids, download=True):
result = []
# Download information about assets (by IDs)
asset_list = get_page(self._session, OPENCOURSE_API_ASSETS_V1_URL,
json=True,
id=','.join(asset_ids))
# Create a map "asset_id => asset" for easier access
asset_map = dict((asset['id'], asset) for asset in asset_list['elements'])
for asset_id in asset_ids:
# Download each asset
asset_dict = asset_map[asset_id]
url = asset_dict['url']['url'].strip()
data, content_type = None, None
if download:
reply = get_reply(self._session, url)
if reply.status_code == 200:
data = reply.content
content_type = reply.headers.get('Content-Type')
asset = Asset(id=asset_dict['id'].strip(),
name=asset_dict['name'].strip(),
type_name=asset_dict['typeName'].strip(),
url=url,
content_type=content_type,
data=data)
self._asset_mapping[asset.id] = asset
result.append(asset)
return result
class CourseraOnDemand(object):
"""
This is a class that provides a friendly interface to extract certain
parts of on-demand courses. On-demand class is a new format that Coursera
is using, they contain `/learn/' in their URLs. This class does not support
old-style Coursera classes. This API is by no means complete.
"""
def __init__(self, session, course_id, course_name,
unrestricted_filenames=False,
mathjax_cdn_url=None):
"""
Initialize Coursera OnDemand API.
@param session: Current session that holds cookies and so on.
@type session: requests.Session
@param course_id: Course ID from course json.
@type course_id: str
@param unrestricted_filenames: Flag that indicates whether grabbed
file names should endure stricter character filtering. @see
`clean_filename` for the details.
@type unrestricted_filenames: bool
"""
self._session = session
self._notebook_cookies = None
self._course_id = course_id
self._course_name = course_name
self._unrestricted_filenames = unrestricted_filenames
self._user_id = None
self._quiz_to_markup = QuizExamToMarkupConverter(session)
self._markup_to_html = MarkupToHTMLConverter(session, mathjax_cdn_url=mathjax_cdn_url)
self._asset_retriever = AssetRetriever(session)
def obtain_user_id(self):
reply = get_page(self._session, OPENCOURSE_MEMBERSHIPS, json=True)
elements = reply['elements']
user_id = elements[0]['userId'] if elements else None
self._user_id = user_id
def list_courses(self):
"""
List enrolled courses.
@return: List of enrolled courses.
@rtype: [str]
"""
reply = get_page(self._session, OPENCOURSE_MEMBERSHIPS, json=True)
course_list = reply['linked']['courses.v1']
slugs = [element['slug'] for element in course_list]
return slugs
def extract_links_from_exam(self, exam_id):
try:
session_id = self._get_exam_session_id(exam_id)
exam_json = self._get_exam_json(exam_id, session_id)
return self._convert_quiz_json_to_links(exam_json, 'exam')
except requests.exceptions.HTTPError as exception:
logging.error('Could not download exam %s: %s', exam_id, exception)
if is_debug_run():
logging.exception('Could not download exam %s: %s', exam_id, exception)
return None
def _get_notebook_folder(self, url, jupyterId, **kwargs):
supplement_links = {}
url = url.format(**kwargs)
reply = get_page(
self._session,
url,
json=True
)
headers = self._auth_headers_with_json()
for content in reply['content']:
if content['type'] == 'directory':
a = self._get_notebook_folder(OPENCOURSE_NOTEBOOK_TREE, jupyterId, jupId=jupyterId, path=content['path'], timestamp=int(time.time()))
supplement_links.update(a)
elif content['type'] == 'file':
tmpUrl = OPENCOURSE_NOTEBOOK_DOWNLOAD.format(path=content['path'], jupId=jupyterId, timestamp=int(time.time()))
filename, extension = os.path.splitext(clean_url(tmpUrl))
head, tail = os.path.split(content['path'])
if os.path.isdir(self._course_name + "/notebook/" + head + "/") == False:
logging.info('Creating [{}] directories...'.format(head))
os.makedirs(self._course_name + "/notebook/" + head + "/")
r = requests.get(tmpUrl.replace(" ", "%20"), cookies=self._session.cookies)
if os.path.exists(self._course_name + "/notebook/" + head + "/" + tail) == False:
logging.info('Downloading {} into {}'.format(tail, head))
with open(self._course_name + "/notebook/" + head + "/" + tail, 'wb+') as f:
f.write(r.content)
else:
logging.info('Skipping {}... (file exists)'.format(tail))
if not str(extension[1:]) in supplement_links:
supplement_links[str(extension[1:])] = []
supplement_links[str(extension[1:])].append((tmpUrl.replace(" ", "%20"), filename))
elif content['type'] == 'notebook':
tmpUrl = OPENCOURSE_NOTEBOOK_DOWNLOAD.format(path=content['path'], jupId=jupyterId, timestamp=int(time.time()))
filename, extension = os.path.splitext(clean_url(tmpUrl))
head, tail = os.path.split(content['path'])
if os.path.isdir(self._course_name + "/notebook/" + head + "/") == False:
logging.info('Creating [{}] directories...'.format(head))
os.makedirs(self._course_name + "/notebook/" + head + "/")
r = requests.get(tmpUrl.replace(" ", "%20"), cookies=self._session.cookies)
if os.path.exists(self._course_name + "/notebook/" + head + "/" + tail) == False:
logging.info('Downloading Jupyter {} into {}'.format(tail, head))
with open(self._course_name + "/notebook/" + head + "/" + tail, 'wb+') as f:
f.write(r.content)
else:
logging.info('Skipping {}... (file exists)'.format(tail))
if not "ipynb" in supplement_links:
supplement_links["ipynb"] = []
supplement_links["ipynb"].append((tmpUrl.replace(" ", "%20"), filename))
else:
logging.info('Unsupported typename {} in notebook'.format(content['type']))
return supplement_links
def _get_notebook_json(self, notebook_id, authorizationId):
import re, time
headers = self._auth_headers_with_json()
reply = get_page(
self._session,
OPENCOURSE_NOTEBOOK_DESCRIPTIONS,
json=False,
authId=authorizationId,
headers=headers
)
jupyterId = re.findall(r"\"\/user\/(.*)\/tree\"", reply)
if len(jupyterId) == 0:
logging.error('Could not download notebook %s', notebook_id)
return None
jupyterId = jupyterId[0]
newReq = requests.Session()
req = newReq.get(OPENCOURSE_NOTEBOOK_TREE.format(jupId=jupyterId, path="/", timestamp=int(time.time())), headers=headers)
return self._get_notebook_folder(OPENCOURSE_NOTEBOOK_TREE, jupyterId, jupId=jupyterId, path="/", timestamp=int(time.time()))
def extract_links_from_notebook(self, notebook_id):
try:
authorizationId = self._extract_notebook_text(notebook_id)
ret = self._get_notebook_json(notebook_id, authorizationId)
return ret
except requests.exceptions.HTTPError as exception:
logging.error('Could not download notebook %s: %s', notebook_id, exception)
if is_debug_run():
logging.exception('Could not download notebook %s: %s', notebook_id, exception)
return None
def extract_links_from_quiz(self, quiz_id):
try:
session_id = self._get_quiz_session_id(quiz_id)
quiz_json = self._get_quiz_json(quiz_id, session_id)
return self._convert_quiz_json_to_links(quiz_json, 'quiz')
except requests.exceptions.HTTPError as exception:
logging.error('Could not download quiz %s: %s', quiz_id, exception)
if is_debug_run():
logging.exception('Could not download quiz %s: %s', quiz_id, exception)
return None
def _convert_quiz_json_to_links(self, quiz_json, filename_suffix):
markup = self._quiz_to_markup(quiz_json)
html = self._markup_to_html(markup)
supplement_links = {}
instructions = (IN_MEMORY_MARKER + html, filename_suffix)
extend_supplement_links(
supplement_links, {IN_MEMORY_EXTENSION: [instructions]})
return supplement_links
def _get_exam_json(self, exam_id, session_id):
headers = self._auth_headers_with_json()
data = {"name": "getState", "argument": []}
reply = get_page(self._session,
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS_GET_STATE,
json=True,
post=True,
data=json.dumps(data),
headers=headers,
session_id=session_id)
return reply['elements'][0]['result']
def _get_exam_session_id(self, exam_id):
headers = self._auth_headers_with_json()
data = {'courseId': self._course_id, 'itemId': exam_id}
_body, reply = post_page_and_reply(self._session,
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS,
data=json.dumps(data),
headers=headers)
return reply.headers.get('X-Coursera-Id')
def _get_quiz_json(self, quiz_id, session_id):
headers = self._auth_headers_with_json()
data = {"contentRequestBody": {"argument": []}}
reply = get_page(self._session,
POST_OPENCOURSE_API_QUIZ_SESSION_GET_STATE,
json=True,
post=True,
data=json.dumps(data),
headers=headers,
user_id=self._user_id,
class_name=self._course_name,
quiz_id=quiz_id,
session_id=session_id)
return reply['contentResponseBody']['return']
def _get_quiz_session_id(self, quiz_id):
headers = self._auth_headers_with_json()
data = {"contentRequestBody":[]}
reply = get_page(self._session,
POST_OPENCOURSE_API_QUIZ_SESSION,
json=True,
post=True,
data=json.dumps(data),
headers=headers,
user_id=self._user_id,
class_name=self._course_name,
quiz_id=quiz_id)
return reply['contentResponseBody']['session']['id']
def _auth_headers_with_json(self):
headers = prepape_auth_headers(self._session, include_cauth=True)
headers.update({
'Content-Type': 'application/json; charset=UTF-8'
})
return headers
def extract_links_from_lecture(self,
video_id, subtitle_language='en',
resolution='540p', assets=None):
"""
Return the download URLs of on-demand course video.
@param video_id: Video ID.
@type video_id: str
@param subtitle_language: Subtitle language.
@type subtitle_language: str
@param resolution: Preferred video resolution.
@type resolution: str
@param assets: List of assets that may present in the video.
@type assets: [str]
@return: @see CourseraOnDemand._extract_links_from_text
"""
if assets is None:
assets = []
try:
links = self._extract_videos_and_subtitles_from_lecture(
video_id, subtitle_language, resolution)
assets = self._normalize_assets(assets)
extend_supplement_links(
links, self._extract_links_from_lecture_assets(assets))
return links
except requests.exceptions.HTTPError as exception:
logging.error('Could not download lecture %s: %s', video_id, exception)
if is_debug_run():
logging.exception('Could not download lecture %s: %s', video_id, exception)
return None
def _normalize_assets(self, assets):
"""
Perform asset normalization. For some reason, assets that are sometimes
present in lectures, have "@1" at the end of their id. Such "uncut"
asset id when fed to OPENCOURSE_ASSETS_URL results in error that says:
"Routing error: 'get-all' not implemented". To avoid that, the last
two characters from asset id are cut off and after that that method
works fine. It looks like, Web UI is doing the same.
@param assets: List of asset ids.
@type assets: [str]
@return: Normalized list of asset ids (without trailing "@1")
@rtype: [str]
"""
new_assets = []
for asset in assets:
# For example: giAxucdaEeWJTQ5WTi8YJQ@1
if len(asset) == 24:
# Turn it into: giAxucdaEeWJTQ5WTi8YJQ
asset = asset[:-2]
new_assets.append(asset)
return new_assets
def _extract_links_from_lecture_assets(self, asset_ids):
"""
Extract links to files of the asset ids.
@param asset_ids: List of asset ids.
@type asset_ids: [str]
@return: @see CourseraOnDemand._extract_links_from_text
"""
links = {}
def _add_asset(name, url, destination):
filename, extension = os.path.splitext(clean_url(name))
if extension is '':
return
extension = clean_filename(
extension.lower().strip('.').strip(),
self._unrestricted_filenames)
basename = clean_filename(
os.path.basename(filename),
self._unrestricted_filenames)
url = url.strip()
if extension not in destination:
destination[extension] = []
destination[extension].append((url, basename))
for asset_id in asset_ids:
for asset in self._get_asset_urls(asset_id):
_add_asset(asset['name'], asset['url'], links)
return links
def _get_asset_urls(self, asset_id):
"""
Get list of asset urls and file names. This method may internally
use AssetRetriever to extract `asset` element types.
@param asset_id: Asset ID.
@type asset_id: str
@return List of dictionaries with asset file names and urls.
@rtype [{
'name': '<filename.ext>'
'url': '<url>'
}]
"""
dom = get_page(self._session, OPENCOURSE_ASSETS_URL,
json=True, id=asset_id)
logging.debug('Parsing JSON for asset_id <%s>.', asset_id)
urls = []
for element in dom['elements']:
typeName = element['typeName']
definition = element['definition']
# Elements of `asset` types look as follows:
#
# {'elements': [{'definition': {'assetId': 'gtSfvscoEeW7RxKvROGwrw',
# 'name': 'Презентация к лекции'},
# 'id': 'phxNlMcoEeWXCQ4nGuQJXw',
# 'typeName': 'asset'}],
# 'linked': None,
# 'paging': None}
#
if typeName == 'asset':
open_course_asset_id = definition['assetId']
for asset in self._asset_retriever([open_course_asset_id],
download=False):
urls.append({'name': asset.name, 'url': asset.url})
# Elements of `url` types look as follows:
#
# {'elements': [{'definition': {'name': 'What motivates you.pptx',
# 'url': 'https://d396qusza40orc.cloudfront.net/learning/Powerpoints/2-4A_What_motivates_you.pptx'},
# 'id': '0hixqpWJEeWQkg5xdHApow',
# 'typeName': 'url'}],
# 'linked': None,
# 'paging': None}
#
elif typeName == 'url':
urls.append({'name': definition['name'].strip(),
'url': definition['url'].strip()})
else:
logging.warning(
'Unknown asset typeName: %s\ndom: %s\n'
'If you think the downloader missed some '
'files, please report the issue here:\n'
'https://github.com/coursera-dl/coursera-dl/issues/new',
typeName, json.dumps(dom, indent=4))
return urls
def _extract_videos_and_subtitles_from_lecture(self,
video_id,
subtitle_language='en',
resolution='540p'):
dom = get_page(self._session, OPENCOURSE_VIDEO_URL,
json=True,
video_id=video_id)
logging.debug('Parsing JSON for video_id <%s>.', video_id)
video_content = {}
# videos
logging.debug('Gathering video URLs for video_id <%s>.', video_id)
sources = dom['sources']
sources.sort(key=lambda src: src['resolution'])
sources.reverse()
# Try to select resolution requested by the user.
filtered_sources = [source
for source in sources
if source['resolution'] == resolution]
if len(filtered_sources) == 0:
# We will just use the 'vanilla' version of sources here, instead of
# filtered_sources.
logging.warning('Requested resolution %s not available for <%s>. '
'Downloading highest resolution available instead.',
resolution, video_id)
else:
logging.debug('Proceeding with download of resolution %s of <%s>.',
resolution, video_id)
sources = filtered_sources
video_url = sources[0]['formatSources']['video/mp4']
video_content['mp4'] = video_url
subtitle_link = self._extract_subtitles_from_video_dom(
dom, subtitle_language, video_id)
for key, value in iteritems(subtitle_link):
video_content[key] = value
lecture_video_content = {}
for key, value in iteritems(video_content):
lecture_video_content[key] = [(value, '')]
return lecture_video_content
def _extract_subtitles_from_video_dom(self, video_dom,
subtitle_language, video_id):
# subtitles and transcripts
subtitle_nodes = [
('subtitles', 'srt', 'subtitle'),
('subtitlesTxt', 'txt', 'transcript'),
]
subtitle_set_download = set()
subtitle_set_nonexist = set()
subtitle_links = {}
for (subtitle_node, subtitle_extension, subtitle_description) \
in subtitle_nodes:
logging.debug('Gathering %s URLs for video_id <%s>.',
subtitle_description, video_id)
subtitles = video_dom.get(subtitle_node)
download_all_subtitle = False
if subtitles is not None:
subtitles_set = set(subtitles)
requested_subtitle_list = [s.strip() for s in
subtitle_language.split(",")]
for language_with_alts in requested_subtitle_list:
if download_all_subtitle:
break
grouped_language_list = [l.strip() for l in
language_with_alts.split("|")]
for language in grouped_language_list:
if language == "all":
download_all_subtitle = True
break
elif language in subtitles_set:
subtitle_set_download.update([language])
break
else:
subtitle_set_nonexist.update([language])
if download_all_subtitle and subtitles is not None:
subtitle_set_download = set(subtitles)
if not download_all_subtitle and subtitle_set_nonexist:
logging.warning("%s unavailable in '%s' language for video "
"with video id: [%s],"
"%s", subtitle_description.capitalize(),
", ".join(subtitle_set_nonexist), video_id,
subtitle_description)
if not subtitle_set_download:
logging.warning("%s all requested subtitles are unavaliable,"
"with video id: [%s], falling back to 'en' "
"%s", subtitle_description.capitalize(),
video_id,
subtitle_description)
subtitle_set_download = set(['en'])
for current_subtitle_language in subtitle_set_download:
subtitle_url = subtitles.get(current_subtitle_language)
if subtitle_url is not None:
# some subtitle urls are relative!
subtitle_links[
"%s.%s" % (current_subtitle_language, subtitle_extension)
] = make_coursera_absolute_url(subtitle_url)
return subtitle_links
def extract_links_from_programming_immediate_instructions(self, element_id):
"""
Return a dictionary with links to supplement files (pdf, csv, zip,
ipynb, html and so on) extracted from graded programming assignment.
@param element_id: Element ID to extract files from.
@type element_id: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.debug('Extracting links from programming immediate '
'instructions for element_id <%s>.', element_id)
try:
# Assignment text (instructions) contains asset tags which describe
# supplementary files.
text = ''.join(
self._extract_programming_immediate_instructions_text(element_id))
if not text:
return {}
supplement_links = self._extract_links_from_text(text)
instructions = (IN_MEMORY_MARKER + self._markup_to_html(text),
'instructions')
extend_supplement_links(
supplement_links, {IN_MEMORY_EXTENSION: [instructions]})
return supplement_links
except requests.exceptions.HTTPError as exception:
logging.error('Could not download programming assignment %s: %s',
element_id, exception)
if is_debug_run():
logging.exception('Could not download programming assignment %s: %s',
element_id, exception)
return None
def extract_links_from_programming(self, element_id):
"""
Return a dictionary with links to supplement files (pdf, csv, zip,
ipynb, html and so on) extracted from graded programming assignment.
@param element_id: Element ID to extract files from.
@type element_id: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.debug('Gathering supplement URLs for element_id <%s>.', element_id)
try:
# Assignment text (instructions) contains asset tags which describe
# supplementary files.
text = ''.join(self._extract_assignment_text(element_id))
if not text:
return {}
supplement_links = self._extract_links_from_text(text)
instructions = (IN_MEMORY_MARKER + self._markup_to_html(text),
'instructions')
extend_supplement_links(
supplement_links, {IN_MEMORY_EXTENSION: [instructions]})
return supplement_links
except requests.exceptions.HTTPError as exception:
logging.error('Could not download programming assignment %s: %s',
element_id, exception)
if is_debug_run():
logging.exception('Could not download programming assignment %s: %s',
element_id, exception)
return None
def extract_links_from_supplement(self, element_id):
"""
Return a dictionary with supplement files (pdf, csv, zip, ipynb, html
and so on) extracted from supplement page.
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.debug('Gathering supplement URLs for element_id <%s>.', element_id)
try:
dom = get_page(self._session, OPENCOURSE_SUPPLEMENT_URL,
json=True,
course_id=self._course_id,
element_id=element_id)
supplement_content = {}
# Supplement content has structure as follows:
# 'linked' {
# 'openCourseAssets.v1' [ {
# 'definition' {
# 'value'
for asset in dom['linked']['openCourseAssets.v1']:
value = asset['definition']['value']
# Supplement lecture types are known to contain both <asset> tags
# and <a href> tags (depending on the course), so we extract
# both of them.
extend_supplement_links(
supplement_content, self._extract_links_from_text(value))
instructions = (IN_MEMORY_MARKER + self._markup_to_html(value),
'instructions')
extend_supplement_links(
supplement_content, {IN_MEMORY_EXTENSION: [instructions]})
return supplement_content
except requests.exceptions.HTTPError as exception:
logging.error('Could not download supplement %s: %s',
element_id, exception)
if is_debug_run():
logging.exception('Could not download supplement %s: %s',
element_id, exception)
return None
def _extract_asset_tags(self, text):
"""
Extract asset tags from text into a convenient form.
@param text: Text to extract asset tags from. This text contains HTML
code that is parsed by BeautifulSoup.
@type text: str
@return: Asset map.
@rtype: {
'<id>': {
'name': '<name>',
'extension': '<extension>'
},
...
}
"""
soup = BeautifulSoup(text)
asset_tags_map = {}
for asset in soup.find_all('asset'):
asset_tags_map[asset['id']] = {'name': asset['name'],
'extension': asset['extension']}
return asset_tags_map
def _extract_asset_urls(self, asset_ids):
"""
Extract asset URLs along with asset ids.
@param asset_ids: List of ids to get URLs for.
@type assertn: [str]
@return: List of dictionaries with asset URLs and ids.
@rtype: [{
'id': '<id>',
'url': '<url>'
}]
"""
dom = get_page(self._session, OPENCOURSE_ASSET_URL,
json=True,
ids=quote_plus(','.join(asset_ids)))
return [{'id': element['id'],
'url': element['url'].strip()}
for element in dom['elements']]
def extract_references_poll(self):
try:
dom = get_page(self._session,
OPENCOURSE_REFERENCES_POLL_URL.format(
course_id=self._course_id),
json=True
)
logging.info('Downloaded resource poll (%d bytes)', len(dom))
return dom['elements']
except requests.exceptions.HTTPError as exception:
logging.error('Could not download resource section: %s',
exception)
if is_debug_run():
logging.exception('Could not download resource section: %s',
exception)
return None
def extract_links_from_reference(self, short_id):
"""
Return a dictionary with supplement files (pdf, csv, zip, ipynb, html
and so on) extracted from supplement page.
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.debug('Gathering resource URLs for short_id <%s>.', short_id)
try:
dom = get_page(self._session, OPENCOURSE_REFERENCE_ITEM_URL,
json=True,
course_id=self._course_id,
short_id=short_id)
resource_content = {}
# Supplement content has structure as follows:
# 'linked' {
# 'openCourseAssets.v1' [ {
# 'definition' {
# 'value'
for asset in dom['linked']['openCourseAssets.v1']:
value = asset['definition']['value']
# Supplement lecture types are known to contain both <asset> tags
# and <a href> tags (depending on the course), so we extract
# both of them.
extend_supplement_links(
resource_content, self._extract_links_from_text(value))
instructions = (IN_MEMORY_MARKER + self._markup_to_html(value),
'resources')
extend_supplement_links(
resource_content, {IN_MEMORY_EXTENSION: [instructions]})
return resource_content
except requests.exceptions.HTTPError as exception:
logging.error('Could not download supplement %s: %s',
short_id, exception)
if is_debug_run():
logging.exception('Could not download supplement %s: %s',
short_id, exception)
return None
def _extract_programming_immediate_instructions_text(self, element_id):
"""
Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str]
"""
dom = get_page(self._session, OPENCOURSE_PROGRAMMING_IMMEDIATE_INSTRUCTIOINS_URL,
json=True,
course_id=self._course_id,
element_id=element_id)
return [element['assignmentInstructions']['definition']['value']
for element in dom['elements']]
def _extract_notebook_text(self, element_id):
"""
Extract notebook text (instructions).
@param element_id: Element id to extract notebook links.
@type element_id: str
@return: Notebook URL.
@rtype: [str]
"""
headers = self._auth_headers_with_json()
data = {'courseId': self._course_id, 'learnerId': self._user_id, 'itemId': element_id}
dom = get_page(self._session, OPENCOURSE_NOTEBOOK_LAUNCHES,
post=True,
json=True,
user_id=self._user_id,
course_id=self._course_id,
headers=headers,
element_id=element_id,
data=json.dumps(data)
)
# Return authorization id. This id changes on each request
return dom['elements'][0]['authorizationId']
def _extract_assignment_text(self, element_id):
"""
Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str]
"""
dom = get_page(self._session, OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL,
json=True,
course_id=self._course_id,
element_id=element_id)
return [element['submissionLearnerSchema']['definition']
['assignmentInstructions']['definition']['value']
for element in dom['elements']]
def _extract_links_from_text(self, text):
"""
Extract supplement links from the html text. Links may be provided
in two ways:
1. <a> tags with href attribute
2. <asset> tags with id attribute (requires additional request
to get the direct URL to the asset file)
@param text: HTML text.
@type text: str
@return: Dictionary with supplement links grouped by extension.
@rtype: {
'<extension1>': [
('<link1>', '<title1>'),
('<link2>', '<title2')
],
'extension2': [
('<link3>', '<title3>'),
('<link4>', '<title4>')
],
...
}
"""
supplement_links = self._extract_links_from_a_tags_in_text(text)
extend_supplement_links(
supplement_links,
self._extract_links_from_asset_tags_in_text(text))
return supplement_links
def _extract_links_from_asset_tags_in_text(self, text):
"""
Scan the text and extract asset tags and links to corresponding
files.
@param text: Page text.
@type text: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
# Extract asset tags from instructions text
asset_tags_map = self._extract_asset_tags(text)
ids = list(iterkeys(asset_tags_map))
if not ids:
return {}
# asset tags contain asset names and ids. We need to make another
# HTTP request to get asset URL.
asset_urls = self._extract_asset_urls(ids)
supplement_links = {}
# Build supplement links, providing nice titles along the way
for asset in asset_urls:
title = clean_filename(
asset_tags_map[asset['id']]['name'],
self._unrestricted_filenames)
extension = clean_filename(
asset_tags_map[asset['id']]['extension'].strip(),
self._unrestricted_filenames)
url = asset['url'].strip()
if extension not in supplement_links:
supplement_links[extension] = []
supplement_links[extension].append((url, title))
return supplement_links
def _extract_links_from_a_tags_in_text(self, text):
"""
Extract supplement links from the html text that contains <a> tags
with href attribute.
@param text: HTML text.
@type text: str
@return: Dictionary with supplement links grouped by extension.
@rtype: {
'<extension1>': [
('<link1>', '<title1>'),
('<link2>', '<title2')
],
'extension2': [
('<link3>', '<title3>'),
('<link4>', '<title4>')
]
}
"""
soup = BeautifulSoup(text)
links = [item['href'].strip()
for item in soup.find_all('a') if 'href' in item.attrs]
links = sorted(list(set(links)))
supplement_links = {}
for link in links:
filename, extension = os.path.splitext(clean_url(link))
# Some courses put links to sites in supplement section, e.g.:
# http://pandas.pydata.org/
if extension is '':
continue
# Make lowercase and cut the leading/trailing dot
extension = clean_filename(
extension.lower().strip('.').strip(),
self._unrestricted_filenames)
basename = clean_filename(
os.path.basename(filename),
self._unrestricted_filenames)
if extension not in supplement_links:
supplement_links[extension] = []
# Putting basename into the second slot of the tuple is important
# because that will allow to download many supplements within a
# single lecture, e.g.:
# 01_slides-presented-in-this-module.pdf
# 01_slides-presented-in-this-module_Dalal-cvpr05.pdf
# 01_slides-presented-in-this-module_LM-3dtexton.pdf
supplement_links[extension].append((link, basename))
return supplement_links
| lgpl-3.0 |
minixalpha/spark | python/pyspark/serializers.py | 4 | 21331 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serialize objects in batches; By default, the batch size is chosen based
on the size of objects, also configurable by SparkContext's C{batchSize} parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class PythonEvalType(object):
NON_UDF = 0
SQL_BATCHED_UDF = 1
SQL_PANDAS_UDF = 2
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
if the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowSerializer(FramedSerializer):
"""
Serializes bytes as Arrow data with the Arrow file format.
"""
def dumps(self, batch):
import pyarrow as pa
import io
sink = io.BytesIO()
writer = pa.RecordBatchFileWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
return sink.getvalue()
def loads(self, obj):
import pyarrow as pa
reader = pa.RecordBatchFileReader(pa.BufferReader(obj))
return reader.read_all()
def __repr__(self):
return "ArrowSerializer"
def _create_batch(series):
import pyarrow as pa
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
# If a nullable integer series has been promoted to floating point with NaNs, need to cast
# NOTE: this is not necessary with Arrow >= 0.7
def cast_series(s, t):
if t is None or s.dtype == t.to_pandas_dtype():
return s
else:
return s.fillna(0).astype(t.to_pandas_dtype(), copy=False)
arrs = [pa.Array.from_pandas(cast_series(s, t), mask=s.isnull(), type=t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
table = pa.Table.from_batches([batch])
yield [c.to_pandas() for c in table.itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hook namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple
# those created in other module can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
return cloudpickle.dumps(obj, 2)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid sevialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
| apache-2.0 |
devanshdalal/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 86 | 1234 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y=1, f(x))$")
plt.show()
| bsd-3-clause |
htygithub/bokeh | bokeh/charts/builders/line_builder.py | 2 | 9366 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from six import iteritems
from itertools import chain
from ..builder import XYBuilder, create_and_build
from ..glyphs import LineGlyph, PointGlyph
from ..attributes import DashAttr, ColorAttr, MarkerAttr
from ..data_source import NumericalColumnsAssigner
from ...models.sources import ColumnDataSource
from ...properties import Bool, String, List
from ..operations import Stack, Dodge
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def Line(data=None, x=None, y=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builders.line_builder.LineBuilder>` to
render the glyphs.
The line chart is typically is used with column oriented data, where each column
contains comparable measurements and the column names are treated as a categorical
variable for differentiating the measurement values. One of the columns can be used as
an index for either the x or y axis.
.. note::
Only the x or y axis can display multiple variables, while the other is used
as an index.
Args:
data (list(list), numpy.ndarray, pandas.DataFrame, list(pd.Series)): a 2d data
source with columns of data for each line.
x (str or list(str), optional): specifies variable(s) to use for x axis
y (str or list(str), optional): specifies variable(s) to use for y axis
In addition the the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
.. note::
This chart type differs on input types as compared to other charts,
due to the way that line charts typically are plotting labeled series. For
example, a column for AAPL stock prices over time. Another way this could be
plotted is to have a DataFrame with a column of `stock_label` and columns of
`price`, which is the stacked format. Both should be supported, but the former
is the expected one. Internally, the latter format is being derived.
Returns:
:class:`Chart`: includes glyph renderers that generate the lines
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
kws['x'] = x
kws['y'] = y
return create_and_build(LineBuilder, data, **kws)
class LineBuilder(XYBuilder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
series_names = List(String, help="""Names that represent the items being plotted.""")
stack = Bool(default=False)
default_attributes = {'color': ColorAttr(),
'dash': DashAttr(),
'marker': MarkerAttr()}
dimensions = ['y', 'x']
column_selector = NumericalColumnsAssigner
glyph = LineGlyph
@property
def measures(self):
if isinstance(self.y.selection, list):
return self.y.selection
elif isinstance(self.x.selection, list):
return self.x.selection
else:
return None
@property
def measure_input(self):
return isinstance(self.y.selection, list) or isinstance(self.x.selection, list)
@property
def stack_flags(self):
# Check if we stack measurements and by which attributes
# This happens if we used the same series labels for dimensions as attributes
return {k: self.attr_measurement(k) for k in list(
self.attributes.keys())}
def get_id_cols(self, stack_flags):
# collect the other columns used as identifiers, that aren't a measurement name
id_cols = [self.attributes[attr].columns
for attr, stack in iteritems(stack_flags) if not stack and
self.attributes[attr].columns != self.measures and
self.attributes[attr].columns is not None]
return list(chain.from_iterable(id_cols))
def setup(self):
"""Handle input options that require transforming data and/or user selections."""
# handle special case of inputs as measures
if self.measure_input:
stack_flags = self.stack_flags
id_cols = self.get_id_cols(stack_flags)
# if we have measures input, we need to stack by something, set default
if all(attr is False for attr in list(stack_flags.values())):
stack_flags['color'] = True
# stack the measurement dimension while keeping id columns
self._stack_measures(ids=id_cols)
# set the attributes to key off of the name of the stacked measurement
source = ColumnDataSource(self._data.df)
for attr_name, stack_flag in iteritems(stack_flags):
if stack_flags[attr_name]:
default_attr = self.attributes[attr_name]
default_attr.setup(columns='series', data=source)
# Handle when to use special column names
if self.x.selection is None and self.y.selection is not None:
self.x.selection = 'index'
elif self.x.selection is not None and self.y.selection is None:
self.y.selection = 'index'
def attr_measurement(self, attr_name):
"""Detect if the attribute has been given measurement columns."""
cols = self.attributes[attr_name].columns
return (cols is not None and (cols == self.y.selection or
cols == self.x.selection))
def set_series(self, col_name):
series = self._data.df[col_name].drop_duplicates().tolist()
series = [str(item) for item in series]
self.series_names = series
def _stack_measures(self, ids, var_name='series'):
"""Stack data and keep the ids columns.
Args:
ids (list(str)): the column names that describe the measures
"""
if isinstance(self.y.selection, list):
dim = 'y'
if self.x.selection is not None:
ids.append(self.x.selection)
else:
dim = 'x'
if self.y.selection is not None:
ids.append(self.y.selection)
if len(ids) == 0:
ids = None
dim_prop = getattr(self, dim)
# transform our data by stacking the measurements into one column
self._data.stack_measures(measures=dim_prop.selection, ids=ids,
var_name=var_name)
# update our dimension with the updated data
dim_prop.set_data(self._data)
self.set_series('series')
def get_builder_attr(self):
attrs = self.properties()
return {attr: getattr(self, attr) for attr in attrs
if attr in self.glyph.properties()}
def yield_renderers(self):
build_attr = self.get_builder_attr()
# get the list of builder attributes and only pass them on if glyph supports
attrs = list(self.attributes.keys())
attrs = [attr for attr in attrs if attr in self.glyph.properties()]
for group in self._data.groupby(**self.attributes):
group_kwargs = self.get_group_kwargs(group, attrs)
group_kwargs.update(build_attr)
glyph = self.glyph(x=group.get_values(self.x.selection),
y=group.get_values(self.y.selection),
**group_kwargs)
# dash=group['dash']
# save reference to composite glyph
self.add_glyph(group, glyph)
# yield each renderer produced by composite glyph
for renderer in glyph.renderers:
yield renderer
Stack().apply(self.comp_glyphs)
Dodge().apply(self.comp_glyphs)
class PointSeriesBuilder(LineBuilder):
glyph = PointGlyph
| bsd-3-clause |
alejob/mdanalysis | package/MDAnalysis/analysis/contacts.py | 1 | 45584 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Native contacts analysis --- :mod:`MDAnalysis.analysis.contacts`
================================================================
This module contains classes to analyze native contacts *Q* over a
trajectory. Native contacts of a conformation are contacts that exist
in a reference structure and in the conformation. Contacts in the
reference structure are always defined as being closer then a distance
`radius`. The fraction of native contacts for a conformation can be
calculated in different ways. This module supports 3 different metrics
listed below, as well as custom metrics.
1. *Hard Cut*: To count as a contact the atoms *i* and *j* have to be at least
as close as in the reference structure.
2. *Soft Cut*: The atom pair *i* and *j* is assigned based on a soft potential
that is 1 if the distance is 0, 1/2 if the distance is the same as in
the reference and 0 for large distances. For the exact definition of the
potential and parameters have a look at function :func:`soft_cut_q`.
3. *Radius Cut*: To count as a contact the atoms *i* and *j* cannot be further
apart than some distance `radius`.
The "fraction of native contacts" *Q(t)* is a number between 0 and 1 and
calculated as the total number of native contacts for a given time frame
divided by the total number of contacts in the reference structure.
Examples for contact analysis
-----------------------------
One-dimensional contact analysis
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As an example we analyze the opening ("unzipping") of salt bridges
when the AdK enzyme opens up; this is one of the example trajectories
in MDAnalysis. ::
import MDAnalysis as mda
from MDAnalysis.analysis import contacts
from MDAnalysis.tests.datafiles import PSF,DCD
import matplotlib.pyplot as plt
# example trajectory (transition of AdK from closed to open)
u = mda.Universe(PSF,DCD)
# crude definition of salt bridges as contacts between NH/NZ in ARG/LYS and
# OE*/OD* in ASP/GLU. You might want to think a little bit harder about the
# problem before using this for real work.
sel_basic = "(resname ARG LYS) and (name NH* NZ)"
sel_acidic = "(resname ASP GLU) and (name OE* OD*)"
# reference groups (first frame of the trajectory, but you could also use a
# separate PDB, eg crystal structure)
acidic = u.select_atoms(sel_acidic)
basic = u.select_atoms(sel_basic)
# set up analysis of native contacts ("salt bridges"); salt bridges have a
# distance <6 A
ca1 = contacts.Contacts(u, selection=(sel_acidic, sel_basic),
refgroup=(acidic, basic), radius=6.0)
# iterate through trajectory and perform analysis of "native contacts" Q
ca1.run()
# print number of averave contacts
average_contacts = np.mean(ca1.timeseries[:, 1])
print('average contacts = {}'.format(average_contacts))
# plot time series q(t)
f, ax = plt.subplots()
ax.plot(ca1.timeseries[:, 0], ca1.timeseries[:, 1])
ax.set(xlabel='frame', ylabel='fraction of native contacts',
title='Native Contacts, average = {:.2f}'.format(average_contacts))
fig.show()
The first graph shows that when AdK opens, about 20% of the salt
bridges that existed in the closed state disappear when the enzyme
opens. They open in a step-wise fashion (made more clear by the movie
`AdK_zipper_cartoon.avi`_).
.. _`AdK_zipper_cartoon.avi`:
http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2803350/bin/NIHMS150766-supplement-03.avi
.. rubric:: Notes
Suggested cutoff distances for different simulations
* For all-atom simulations, cutoff = 4.5 Å
* For coarse-grained simulations, cutoff = 6.0 Å
Two-dimensional contact analysis (q1-q2)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Analyze a single DIMS transition of AdK between its closed and open
conformation and plot the trajectory projected on q1-q2 [Franklin2007]_ ::
import MDAnalysis as mda
from MDAnalysis.analysis import contacts
from MDAnalysisTests.datafiles import PSF, DCD
u = mda.Universe(PSF, DCD)
q1q2 = contacts.q1q2(u, 'name CA', radius=8)
q1q2.run()
f, ax = plt.subplots(1, 2, figsize=plt.figaspect(0.5))
ax[0].plot(q1q2.timeseries[:, 0], q1q2.timeseries[:, 1], label='q1')
ax[0].plot(q1q2.timeseries[:, 0], q1q2.timeseries[:, 2], label='q2')
ax[0].legend(loc='best')
ax[1].plot(q1q2.timeseries[:, 1], q1q2.timeseries[:, 2], '.-')
f.show()
Compare the resulting pathway to the `MinActionPath result for AdK`_
[Franklin2007]_.
.. _MinActionPath result for AdK:
http://lorentz.dynstr.pasteur.fr/joel/adenylate.php
Writing your own contact analysis
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :class:`Contacts` class has been designed to be extensible for your own
analysis. As an example we will analyze when the acidic and basic groups of AdK
are in contact which each other; this means that at least one of the contacts
formed in the reference is closer than 2.5 Å.
For this we define a new function to determine if any contact is closer than
2.5 Å; this function must implement the API prescribed by :class:`Contacts`::
def is_any_closer(r, r0, dist=2.5):
return np.any(r < dist)
The first two parameters `r` and `r0` are provided by :class:`Contacts` when it
calls :func:`is_any_closer` while the others can be passed as keyword args
using the `kwargs` parameter in :class:`Contacts`.
Next we are creating an instance of the :class:`Contacts` class and use the
:func:`is_any_closer` function as an argument to `method` and run the analysis::
# crude definition of salt bridges as contacts between NH/NZ in ARG/LYS and
# OE*/OD* in ASP/GLU. You might want to think a little bit harder about the
# problem before using this for real work.
sel_basic = "(resname ARG LYS) and (name NH* NZ)"
sel_acidic = "(resname ASP GLU) and (name OE* OD*)"
# reference groups (first frame of the trajectory, but you could also use a
# separate PDB, eg crystal structure)
acidic = u.select_atoms(sel_acidic)
basic = u.select_atoms(sel_basic)
nc = contacts.Contacts(u, selection=(sel_acidic, sel_basic),
method=is_any_closer,
refgroup=(acidic, basic), kwargs={'dist': 2.5})
nc.run()
bound = nc.timeseries[:, 1]
frames = nc.timeseries[:, 0]
f, ax = plt.subplots()
ax.plot(frames, bound, '.')
ax.set(xlabel='frame', ylabel='is Bound',
ylim=(-0.1, 1.1))
f.show()
Functions
---------
.. autofunction:: hard_cut_q
.. autofunction:: soft_cut_q
.. autofunction:: radius_cut_q
.. autofunction:: contact_matrix
.. autofunction:: q1q2
Classes
-------
.. autoclass:: Contacts
:members:
Deprecated
----------
The following classes are deprecated and are scheduled for removal in release 0.17.0.
.. autoclass:: ContactAnalysis1
:members:
.. autoclass:: ContactAnalysis
:members:
"""
from __future__ import division
from six.moves import zip
import os
import errno
import warnings
import bz2
import numpy as np
from numpy.lib.utils import deprecate
import logging
import MDAnalysis
import MDAnalysis.lib.distances
from MDAnalysis.lib.util import openany
from MDAnalysis.analysis.distances import distance_array
from MDAnalysis.core.groups import AtomGroup
from .base import AnalysisBase
logger = logging.getLogger("MDAnalysis.analysis.contacts")
def soft_cut_q(r, r0, beta=5.0, lambda_constant=1.8):
r"""Calculate fraction of native contacts *Q* for a soft cut off
The native contact function is defined as [Best2013]_
.. math::
Q(r, r_0) = \frac{1}{1 + e^{\beta (r - \lambda r_0)}}
Reasonable values for different simulation types are
- *All Atom*: `lambda_constant = 1.8` (unitless)
- *Coarse Grained*: `lambda_constant = 1.5` (unitless)
Parameters
----------
r: array
Contact distances at time t
r0: array
Contact distances at time t=0, reference distances
beta: float (default 5.0 Angstrom)
Softness of the switching function
lambda_constant: float (default 1.8, unitless)
Reference distance tolerance
Returns
-------
Q : float
fraction of native contacts
References
----------
.. [Best2013] RB Best, G Hummer, and WA Eaton, "Native contacts determine protein
folding mechanisms in atomistic simulations" _PNAS_ **110** (2013),
17874–17879. doi: `10.1073/pnas.1311599110
<http://doi.org/10.1073/pnas.1311599110>`_.
"""
r = np.asarray(r)
r0 = np.asarray(r0)
result = 1/(1 + np.exp(beta*(r - lambda_constant * r0)))
return result.sum() / len(r0)
def hard_cut_q(r, cutoff):
"""Calculate fraction of native contacts *Q* for a hard cut off.
The cutoff can either be a float or a :class:`~numpy.ndarray` of the same
shape as `r`.
Parameters
----------
r : ndarray
distance matrix
cutoff : ndarray | float
cut off value to count distances. Can either be a float of a ndarray of
the same size as distances
Returns
-------
Q : float
fraction of contacts
"""
r = np.asarray(r)
cutoff = np.asarray(cutoff)
y = r <= cutoff
return y.sum() / r.size
def radius_cut_q(r, r0, radius):
"""calculate native contacts *Q* based on the single distance radius.
Parameters
----------
r : ndarray
distance array between atoms
r0 : ndarray
unused to fullfill :class:`Contacts` API
radius : float
Distance between atoms at which a contact is formed
Returns
-------
Q : float
fraction of contacts
References
----------
.. [Franklin2007] Franklin, J., Koehl, P., Doniach, S., & Delarue,
M. (2007). MinActionPath: Maximum likelihood trajectory for large-scale
structural transitions in a coarse-grained locally harmonic energy
landscape. Nucleic Acids Research, 35(SUPPL.2), 477–482.
doi: `10.1093/nar/gkm342 <http://doi.org/10.1093/nar/gkm342>`_
"""
return hard_cut_q(r, radius)
def contact_matrix(d, radius, out=None):
"""calculate contacts from distance matrix
Parameters
----------
d : array-like
distance matrix
radius : float
distance below which a contact is formed.
out: array (optional)
If `out` is supplied as a pre-allocated array of the correct
shape then it is filled instead of allocating a new one in
order to increase performance.
Returns
-------
contacts : ndarray
boolean array of formed contacts
"""
if out is not None:
out[:] = d <= radius
else:
out = d <= radius
return out
class Contacts(AnalysisBase):
"""Calculate contacts based observables.
The standard methods used in this class calculate the fraction of native
contacts *Q* from a trajectory.
.. rubric:: Contact API
By defining your own method it is possible to calculate other observables
that only depend on the distances and a possible reference distance. The
**Contact API** prescribes that this method must be a function with call
signature ``func(r, r0, **kwargs)`` and must be provided in the keyword
argument `method`.
Attributes
----------
timeseries : list
list containing *Q* for all refgroup pairs and analyzed frames
"""
def __init__(self, u, selection, refgroup, method="hard_cut", radius=4.5,
kwargs=None, **basekwargs):
"""
Parameters
----------
u : Universe
trajectory
selection : tuple(string, string)
two contacting groups that change over time
refgroup : tuple(AtomGroup, AtomGroup)
two contacting atomgroups in their reference conformation. This
can also be a list of tuples containing different atom groups
radius : float, optional (4.5 Angstroms)
radius within which contacts exist in refgroup
method : string | callable (optional)
Can either be one of ``['hard_cut' , 'soft_cut']`` or a callable
with call signature ``func(r, r0, **kwargs)`` (the "Contacts API").
kwargs : dict, optional
dictionary of additional kwargs passed to `method`. Check
respective functions for reasonable values.
start : int, optional
First frame of trajectory to analyse, Default: None becomes 0.
stop : int, optional
Frame index to stop analysis. Default: None becomes
n_frames. Iteration stops *before* this frame number,
which means that the trajectory would be read until the end.
step : int, optional
Step between frames to analyse, Default: None becomes 1.
"""
self.u = u
super(Contacts, self).__init__(self.u.trajectory, **basekwargs)
if method == 'hard_cut':
self.fraction_contacts = hard_cut_q
elif method == 'soft_cut':
self.fraction_contacts = soft_cut_q
else:
if not callable(method):
raise ValueError("method has to be callable")
self.fraction_contacts = method
self.selection = selection
self.grA = u.select_atoms(selection[0])
self.grB = u.select_atoms(selection[1])
# contacts formed in reference
self.r0 = []
self.initial_contacts = []
if isinstance(refgroup[0], AtomGroup):
refA, refB = refgroup
self.r0.append(distance_array(refA.positions, refB.positions))
self.initial_contacts.append(contact_matrix(self.r0[-1], radius))
else:
for refA, refB in refgroup:
self.r0.append(distance_array(refA.positions, refB.positions))
self.initial_contacts.append(contact_matrix(self.r0[-1],
radius))
self.fraction_kwargs = kwargs if kwargs is not None else {}
self.timeseries = []
def _single_frame(self):
# compute distance array for a frame
d = distance_array(self.grA.positions, self.grB.positions)
y = np.empty(len(self.r0) + 1)
y[0] = self._ts.frame
for i, (initial_contacts, r0) in enumerate(zip(self.initial_contacts,
self.r0)):
# select only the contacts that were formed in the reference state
r = d[initial_contacts]
r0 = r0[initial_contacts]
y[i + 1] = self.fraction_contacts(r, r0, **self.fraction_kwargs)
if len(y) == 1:
y = y[0]
self.timeseries.append(y)
def _conclude(self):
self.timeseries = np.array(self.timeseries, dtype=float)
def save(self, outfile):
"""save contacts timeseries
Parameters
----------
outfile : str
file to save contacts
"""
with open(outfile, "w") as f:
f.write("# q1 analysis\n")
np.savetxt(f, self.timeseries)
def _new_selections(u_orig, selections, frame):
"""create stand alone AGs from selections at frame"""
u = MDAnalysis.Universe(u_orig.filename, u_orig.trajectory.filename)
u.trajectory[frame]
return [u.select_atoms(s) for s in selections]
def q1q2(u, selection='all', radius=4.5,
start=None, stop=None, step=None):
"""Perform a q1-q2 analysis.
Compares native contacts between the starting structure and final structure
of a trajectory [Franklin2007]_.
Parameters
----------
u : Universe
Universe with a trajectory
selection : string, optional
atoms to do analysis on
radius : float, optional
distance at which contact is formed
start : int, optional
First frame of trajectory to analyse, Default: 0
stop : int, optional
Last frame of trajectory to analyse, Default: -1
step : int, optional
Step between frames to analyse, Default: 1
Returns
-------
contacts : :class:`Contacts`
Contact Analysis that is set up for a q1-q2 analysis
"""
selection = (selection, selection)
first_frame_refs = _new_selections(u, selection, 0)
last_frame_refs = _new_selections(u, selection, -1)
return Contacts(u, selection,
(first_frame_refs, last_frame_refs),
radius=radius, method=radius_cut_q,
start=start, stop=stop, step=step,
kwargs={'radius': radius})
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
################################################################################
# What comes now are old deprecated contact Analysis classes
# ContactAnalysis needs to be cleaned up and possibly renamed but
# until then it remains because we don't have the functionality
# elsewhere.
@deprecate(new_name="Contacts", message="This class will be removed in 0.17")
class ContactAnalysis(object):
"""Perform a native contact analysis ("q1-q2").
The analysis of the trajectory is performed with the
:meth:`ContactAnalysis.run` method. The result is stored in
:attr:`ContactAnalysis.timeseries`. It is a numpy array which
contains the frame number at index 0, q1 and q2 at index 1 and 2,
and the total number of contacts in 3 and 4. ::
frame q1 q2 n1 n2
The total number of contacts in the reference states 1 and 2 are
stored in :attr:`ContactAnalysis.nref` (index 0 and 1).
The :meth:`ContactAnalysis.run` method calculates the percentage of native
contacts *q1* and *q2* along a trajectory. "Contacts" are defined as the
number of Ca atoms (or per-residue *centroids* of a user defined
*selection*) within *radius* of a primary Ca. *q1* is the fraction of
contacts relative to the reference state 1 (typically the starting
conformation of the trajectory) and *q2* is the fraction of contacts
relative to the conformation 2.
The timeseries is written to a bzip2-compressed file in `targetdir`
named "basename(trajectory)infix_q1q2.dat.bz2" and is also
accessible as the attribute
:attr:`ContactAnalysis.timeseries`.
.. deprecated:: 0.15.0
"""
def __init__(self, topology, trajectory, ref1=None, ref2=None, radius=8.0,
targetdir=os.path.curdir, infix="", force=False,
selection="name CA", centroids=False):
"""
Parameters
----------
topology : filename as str
topology file
trajectory : filename as str
trajectory
ref1 : filename or ``None``, optional
structure of the reference conformation 1 (pdb); if ``None`` the
*first* frame of the trajectory is chosen
ref2 : filename or ``None``, optional
structure of the reference conformation 2 (pdb); if ``None`` the
*last* frame of the trajectory is chosen
radius : float, optional, default 8 A
contacts are deemed any Ca within radius
targetdir : path, optional, default ``.``
output files are saved in this directory
infix : string, optional
additional tag string that is inserted into the output filename of
the data file
selection : string, optional, default ``"name CA"``
MDAnalysis selection string that selects the particles of
interest; the default is to only select the C-alpha atoms
in `ref1` and `ref2`
.. Note:: If `selection` produces more than one atom per
residue then you will get multiple contacts per
residue unless you also set `centroids` = ``True``
centroids : bool
If set to ``True``, use the centroids for the selected atoms on a
per-residue basis to compute contacts. This allows, for instance
defining the sidechains as `selection` and then computing distances
between sidechain centroids.
"""
self.topology = topology
self.trajectory = trajectory
self.radius = radius
self.targetdir = targetdir
self.force = force
self.selection = selection
self.centroids = centroids
trajectorybase = os.path.splitext(os.path.basename(trajectory))[0]
output = trajectorybase + infix + '_q1q2.dat'
self.output = os.path.join(self.targetdir, output)
self.output_bz2 = self.output + '.bz2'
self.timeseries = None # final result
# short circuit if output file already exists: skip everything
if self.output_exists():
self._skip = True
# do not bother reading any data or initializing arrays... !!
return
# don't bother if trajectory is empty (can lead to segfaults so better
# catch it)
stats = os.stat(trajectory)
if stats.st_size == 0:
warnings.warn('trajectory = {trajectory!s} is empty, '
'skipping...'.format(**vars()))
self._skip = True
return
# under normal circumstances we do not skip
self._skip = False
# expensive initialization starts with building Universes :-)
self.u = MDAnalysis.Universe(topology, trajectory)
if ref1 is None:
ref1 = os.path.join(self.targetdir, trajectorybase + '_first.pdb')
self.u.trajectory[0] # extract first frame
self.u.atoms.write(ref1)
self.ref1 = ref1
if ref2 is None:
ref2 = os.path.join(self.targetdir, trajectorybase + '_last.pdb')
self.u.trajectory[-1] # extract last frame
self.u.atoms.write(ref2)
self.u.trajectory[0] # rewind, just in case...
self.ref2 = ref2
r1 = MDAnalysis.Universe(topology, self.ref1)
r2 = MDAnalysis.Universe(topology, self.ref2)
self.ca = self.u.select_atoms(self.selection)
ca1 = r1.select_atoms(self.selection)
ca2 = r2.select_atoms(self.selection)
# NOTE: self_distance_array() produces a 1D array; this works here
# but is not the same as the 2D output from distance_array()!
# See the docs for self_distance_array().
dref = [self.get_distance_array(ca1), self.get_distance_array(ca2)]
self.qref = [self.qarray(dref[0]), self.qarray(dref[1])]
self.nref = [self.qref[0].sum(), self.qref[1].sum()]
self.d = np.zeros_like(dref[0])
self.q = self.qarray(self.d)
self._qtmp = np.zeros_like(self.q) # pre-allocated array
def get_distance_array(self, g, **kwargs):
"""Calculate the self_distance_array for atoms in group *g*.
Parameters
----------
g : AtomGroup
group of atoms to calculate distance array for
results : array, optional
passed on to :func:`MDAnalysis.lib.distances.self_distance_array`
as a preallocated array
centroids : bool, optional, default ``None``
``True``: calculate per-residue centroids from the selected
atoms; ``False``: consider each atom separately; ``None``: use
the class default for *centroids* [``None``]
"""
centroids = kwargs.pop("centroids", None)
centroids = self.centroids if centroids is None else centroids
if not centroids:
coordinates = g.positions
else:
# centroids per residue (but only including the selected atoms)
coordinates = np.array([residue.centroid()
for residue in g.split("residue")])
return MDAnalysis.lib.distances.self_distance_array(coordinates,
**kwargs)
def output_exists(self, force=False):
"""Return True if default output file already exists.
Disable with force=True (will always return False)
"""
return (os.path.isfile(self.output) or
os.path.isfile(self.output_bz2)) and not (self.force or force)
def run(self, store=True, force=False):
"""Analyze trajectory and produce timeseries.
Stores results in :attr:`ContactAnalysis.timeseries` (if
store=True) and writes them to a bzip2-compressed data file.
"""
if self._skip or self.output_exists(force=force):
warnings.warn("File {output!r} or {output_bz2!r} already exists, "
"loading {trajectory!r}.".format(**vars(self)))
try:
self.load(self.output)
except IOError:
self.load(self.output_bz2)
return None
outbz2 = bz2.BZ2File(self.output_bz2, mode='w', buffering=8192)
try:
outbz2.write("# q1-q2 analysis\n"
"# nref1 = {0:d}\n"
"# nref2 = {1:d}\n".format(self.nref[0],
self.nref[1]))
outbz2.write("# frame q1 q2 n1 n2\n")
records = []
for ts in self.u.trajectory:
frame = ts.frame
# use pre-allocated distance array to save a little bit of time
self.get_distance_array(self.ca, result=self.d)
self.qarray(self.d, out=self.q)
n1, q1 = self.qN(self.q, 0, out=self._qtmp)
n2, q2 = self.qN(self.q, 1, out=self._qtmp)
if store:
records.append((frame, q1, q2, n1, n2))
outbz2.write("{frame:4d} {q1:8.6f} {q2:8.6f} {n1:5d} {n2:5d}\n".format(**vars()))
finally:
outbz2.close()
if store:
self.timeseries = np.array(records).T
return self.output_bz2
def qarray(self, d, out=None):
"""Return array with ``True`` for contacts.
Note
----
This method is typically only used internally.
Arguments
---------
d : array
2D array of distances. The method uses the value of
:attr:`radius` to determine if a ``distance < radius``
is considered a contact.
out : array, optional
If `out` is supplied as a pre-allocated array of the correct
shape then it is filled instead of allocating a new one in
order to increase performance.
Returns
-------
array
contact matrix
"""
if out is None:
out = (d <= self.radius)
else:
out[:] = (d <= self.radius)
return out
def qN(self, q, n, out=None):
"""Calculate native contacts relative to reference state.
Note
----
This method is typically only used internally.
Arguments
---------
q : array
contact matrix (see :meth:`Contacts.qarray`)
out : array, optional
If `out` is supplied as a pre-allocated array of the correct
shape then it will contain the contact matrix relative
to the reference state, i.e. only those contacts that
are also seen in the reference state.
Returns
-------
contacts : integer
total number of contacts
fraction : float
fraction of contacts relative to the reference state
"""
if out is None:
out = np.logical_and(q, self.qref[n])
else:
np.logical_and(q, self.qref[n], out)
contacts = out.sum()
return contacts, float(contacts) / self.nref[n]
def load(self, filename):
"""Load the data file."""
records = []
with openany(filename) as data:
for line in data:
if line.startswith('#'):
continue
records.append(map(float, line.split()))
self.timeseries = np.array(records).T
def plot(self, **kwargs):
"""Plot q1-q2."""
from pylab import plot, xlabel, ylabel
kwargs.setdefault('color', 'black')
if self.timeseries is None:
raise ValueError("No timeseries data; do "
"'ContactAnalysis.run(store=True)' first.")
t = self.timeseries
plot(t[1], t[2], **kwargs)
xlabel(r"$q_1$")
ylabel(r"$q_2$")
@deprecate(new_name="Contacts", message="This class will be removed in 0.17")
class ContactAnalysis1(object):
"""Perform a very flexible native contact analysis with respect to a single
reference.
This analysis class allows one to calculate the fraction of native contacts
*q* between two arbitrary groups of atoms with respect to an arbitrary
reference structure. For instance, as a reference one could take a crystal
structure of a complex, and as the two groups atoms one selects two
molecules A and B in the complex. Then the question to be answered by *q*
is, is which percentage of the contacts between A and B persist during the
simulation.
First prepare :class:`~MDAnalysis.core.groups.AtomGroup` selections for
the reference atoms; this example uses some arbitrary selections::
ref = Universe('crystal.pdb')
refA = ref.select_atoms('name CA and segid A and resid 6:100')
refB = ref.select_atoms('name CA and segid B and resid 1:40')
Load the trajectory::
u = Universe(topology, trajectory)
We then need two selection strings *selA* and *selB* that, when applied as
``u.select_atoms(selA)`` produce a list of atoms that is equivalent to the
reference (i.e. ``u.select_atoms(selA)`` must select the same atoms as
``refA`` in this example)::
selA = 'name CA and resid 1:95' # corresponds to refA
selB = 'name CA and resid 150:189' # corresponds to refB
.. Note::
It is the user's responsibility to provide a reference group
(or groups) that describe equivalent atoms to the ones selected
by *selection*.
Now we are ready to set up the analysis::
CA1 = ContactAnalysis1(u, selection=(selA,selB), refgroup=(refA,refB),
radius=8.0, outfile="q.dat")
If the groups do not match in length then a :exc:`ValueError` is raised.
The analysis across the whole trajectory is performed with ::
CA1.run()
Results are saved to *outfile* (``framenumber q N`` per line) and
can also be plotted with ::
CA1.plot() # plots the time series q(t)
CA1.plot_qavg() # plots the matrix of average contacts <q>
**Description of computed values** in the output file:
*N*
number of native contacts
*q*
fraction of native contacts relative to the reference
.. deprecated:: 0.15.0
"""
def __init__(self, *args, **kwargs):
"""Calculate native contacts within a group or between two groups.
:Arguments:
*topology*
psf or pdb file
*trajectory*
dcd or xtc/trr file
*universe*
instead of a topology/trajectory combination, one can also supply
a :class:`MDAnalysis.Universe`
:Keywords:
*selection*
selection string that determines which distances are calculated; if
this is a tuple or list with two entries then distances are
calculated between these two different groups ["name CA or name
B*"]
*refgroup*
reference group, either a single
:class:`~MDAnalysis.core.groups.AtomGroup` (if there is only a
single *selection*) or a list of two such groups. The reference
contacts are directly computed from *refgroup* and hence the atoms
in the reference group(s) must be equivalent to the ones produced
by the *selection* on the input trajectory.
*radius*
contacts are deemed any atoms within radius [8.0 A]
*outfile*
name of the output file; with the gz or bz2 suffix, a compressed
file is written. The average <q> is written to a second, gzipped
file that has the same name with 'array' included. E.g. for the
default name "q1.dat.gz" the <q> file will be "q1.array.gz". The
format is the matrix in column-row format, i.e. selection 1
residues are the columns and selection 2 residues are rows. The
file can be read with :func:`np.loadtxt`. ["q1.dat.gz"]
The function calculates the percentage of native contacts q1
along a trajectory. "Contacts" are defined as the number of atoms
within *radius* of a given other atom. *q1* is the fraction of contacts
relative to the reference state 1 (typically the starting conformation
of the trajectory).
The timeseries is written to a file *outfile* and is also accessible as
the attribute :attr:`ContactAnalysis1.timeseries`.
"""
# XX or should I use as input
# sel = (group1, group2), ref = (refgroup1, refgroup2)
# and get the universe from sel?
# Currently it's a odd hybrid.
#
# Enhancements:
# - select contact pairs to write out as a timecourse
# - make this selection based on qavg
from os.path import splitext
warnings.warn("ContactAnalysis1 is deprecated and will be removed "
"in 1.0. Use Contacts instead.",
category=DeprecationWarning)
self.selection_strings = self._return_tuple2(kwargs.pop(
'selection', "name CA or name B*"), "selection")
self.references = self._return_tuple2(kwargs.pop('refgroup', None),
"refgroup")
self.radius = kwargs.pop('radius', 8.0)
self.targetdir = kwargs.pop('targetdir', os.path.curdir)
self.output = kwargs.pop('outfile', "q1.dat.gz")
self.outarray = splitext(splitext(self.output)[0])[0] + ".array.gz"
self.force = kwargs.pop('force', False)
self.timeseries = None # final result
self.filenames = args
self.universe = MDAnalysis.as_Universe(*args, **kwargs)
self.selections = [self.universe.select_atoms(s)
for s in self.selection_strings]
# sanity checkes
for x in self.references:
if x is None:
raise ValueError("a reference AtomGroup must be supplied")
for ref, sel, s in zip(self.references,
self.selections,
self.selection_strings):
if ref.atoms.n_atoms != sel.atoms.n_atoms:
raise ValueError("selection=%r: Number of atoms differ "
"between reference (%d) and trajectory (%d)" %
(s, ref.atoms.n_atoms, sel.atoms.n_atoms))
# compute reference contacts
dref = MDAnalysis.lib.distances.distance_array(
self.references[0].positions, self.references[1].positions)
self.qref = self.qarray(dref)
self.nref = self.qref.sum()
# setup arrays for the trajectory
self.d = np.zeros_like(dref)
self.q = self.qarray(self.d)
self._qtmp = np.zeros_like(self.q) # pre-allocated array
self.qavg = np.zeros(shape=self.q.shape, dtype=np.float64)
def _return_tuple2(self, x, name):
if not isinstance(x, (tuple, list, np.ndarray)):
t = (x,)
else:
t = x
if len(t) == 2:
return t
elif len(t) == 1:
return (x, x)
else:
raise ValueError("%(name)s must be a single object or a "
"tuple/list with two objects and not %(x)r" % vars())
def output_exists(self, force=False):
"""Return True if default output file already exists.
Disable with force=True (will always return False)
"""
return os.path.isfile(self.output) and not (self.force or force)
def run(self, store=True, force=False, start=0, stop=None, step=1,
**kwargs):
"""Analyze trajectory and produce timeseries.
Stores results in :attr:`ContactAnalysis1.timeseries` (if store=True)
and writes them to a data file. The average q is written to a second
data file.
*start*
The value of the first frame index in the trajectory to be used
(default: index 0)
*stop*
The value of the last frame index in the trajectory to be used
(default: None -- use all frames)
*step*
The number of frames to skip during trajectory iteration (default:
use every frame)
"""
if 'start_frame' in kwargs:
warnings.warn("start_frame argument has been deprecated, use "
"start instead -- removal targeted for version "
"0.15.0", DeprecationWarning)
start = kwargs.pop('start_frame')
if 'end_frame' in kwargs:
warnings.warn("end_frame argument has been deprecated, use "
"stop instead -- removal targeted for version "
"0.15.0", DeprecationWarning)
stop = kwargs.pop('end_frame')
if 'step_value' in kwargs:
warnings.warn("step_value argument has been deprecated, use "
"step instead -- removal targeted for version "
"0.15.0", DeprecationWarning)
step = kwargs.pop('step_value')
if self.output_exists(force=force):
warnings.warn("File %r already exists, loading it INSTEAD of "
"trajectory %r. Use force=True to overwrite "
"the output file. " %
(self.output, self.universe.trajectory.filename))
self.load(self.output)
return None
with openany(self.output, 'w') as out:
out.write("# q1 analysis\n# nref = {0:d}\n".format((self.nref)))
out.write("# frame q1 n1\n")
records = []
self.qavg *= 0 # average contact existence
A, B = self.selections
for ts in self.universe.trajectory[start:stop:step]:
frame = ts.frame
# use pre-allocated distance array to save a little bit of time
MDAnalysis.lib.distances.distance_array(A.coordinates(),
B.coordinates(),
result=self.d)
self.qarray(self.d, out=self.q)
n1, q1 = self.qN(self.q, out=self._qtmp)
self.qavg += self.q
if store:
records.append((frame, q1, n1))
out.write("{frame:4d} {q1:8.6f} {n1:5d}\n".format(**vars()))
if store:
self.timeseries = np.array(records).T
n_frames = len(np.arange(
self.universe.trajectory.n_frames)[start:stop:step])
if n_frames > 0:
self.qavg /= n_frames
else:
logger.warn("No frames were analyzed. "
"Check values of start, stop, step.")
logger.debug("start={start} stop={stop} step={step}".format(**vars()))
np.savetxt(self.outarray, self.qavg, fmt="%8.6f")
return self.output
def qarray(self, d, out=None):
"""Return distance array with True for contacts.
*d* is the matrix of distances. The method uses the value of
:attr:`ContactAnalysis1.radius` to determine if a ``distance < radius``
is considered a contact.
If *out* is supplied as a pre-allocated array of the correct
shape then it is filled instead of allocating a new one in
order to increase performance.
This method is typically only used internally.
"""
if out is None:
out = (d <= self.radius)
else:
out[:] = (d <= self.radius)
return out
def qN(self, q, out=None):
"""Calculate native contacts relative to reference state.
*q* is the matrix of contacts (e.g. :attr:`~ContactAnalysis1.q`).
If *out* is supplied as a pre-allocated array of the correct
shape then it is filled instead of allocating a new one in
order to increase performance.
This method is typically only used internally.
"""
if out is None:
out = np.logical_and(q, self.qref)
else:
np.logical_and(q, self.qref, out)
contacts = out.sum()
return contacts, float(contacts) / self.nref
def load(self, filename):
"""Load the data file."""
records = []
with openany(filename) as data:
for line in data:
if line.startswith('#'):
continue
records.append(map(float, line.split()))
self.timeseries = np.array(records).T
try:
self.qavg = np.loadtxt(self.outarray)
except IOError as err:
if err.errno != errno.ENOENT:
raise
def plot(self, filename=None, **kwargs):
"""Plot q(t).
If `filename` is supplied then the figure is also written to file (the
suffix determines the file type, e.g. pdf, png, eps, ...). All other
keyword arguments are passed on to :func:`pylab.plot`.
"""
from pylab import plot, xlabel, ylabel, savefig
kwargs.setdefault('color', 'black')
kwargs.setdefault('linewidth', 2)
if self.timeseries is None:
raise ValueError("No timeseries data; "
"do 'ContactAnalysis.run(store=True)' first.")
t = self.timeseries
plot(t[0], t[1], **kwargs)
xlabel(r"frame number $t$")
ylabel(r"native contacts $q_1$")
if filename is not None:
savefig(filename)
def _plot_qavg_pcolor(self, filename=None, **kwargs):
"""Plot :attr:`ContactAnalysis1.qavg`, the matrix of average native
contacts."""
from pylab import (pcolor, gca, meshgrid, xlabel, ylabel, xlim, ylim,
colorbar, savefig)
x, y = self.selections[0].resids, self.selections[1].resids
X, Y = meshgrid(x, y)
pcolor(X, Y, self.qavg.T, **kwargs)
gca().set_aspect('equal')
xlim(min(x), max(x))
ylim(min(y), max(y))
xlabel("residues")
ylabel("residues")
colorbar()
if filename is not None:
savefig(filename)
def plot_qavg(self, filename=None, **kwargs):
"""Plot :attr:`ContactAnalysis1.qavg`, the matrix of average native contacts.
If *filename* is supplied then the figure is also written to file (the
suffix determines the file type, e.g. pdf, png, eps, ...). All other
keyword arguments are passed on to :func:`pylab.imshow`.
"""
from pylab import (imshow, xlabel, ylabel, xlim, ylim, colorbar, cm,
clf, savefig)
x, y = self.selections[0].resids, self.selections[1].resids
kwargs['origin'] = 'lower'
kwargs.setdefault('aspect', 'equal')
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('vmin', 0)
kwargs.setdefault('vmax', 1)
kwargs.setdefault('cmap', cm.hot)
kwargs.setdefault('extent', (min(x), max(x), min(y), max(y)))
clf()
imshow(self.qavg.T, **kwargs)
xlim(min(x), max(x))
ylim(min(y), max(y))
xlabel("residue from {0!r}".format(self.selection_strings[0]))
ylabel("residue from {0!r}".format(self.selection_strings[1]))
colorbar()
if filename is not None:
savefig(filename)
| gpl-2.0 |
fivejjs/AD3 | python/example_grid.py | 2 | 4958 | import itertools
import numpy as np
import matplotlib.pyplot as plt
import pdb
import ad3.factor_graph as fg
grid_size = 20
num_states = 5
factor_graph = fg.PFactorGraph()
multi_variables = []
random_grid = np.random.uniform(size=(grid_size, grid_size, num_states))
for i in xrange(grid_size):
multi_variables.append([])
for j in xrange(grid_size):
new_variable = factor_graph.create_multi_variable(num_states)
for state in xrange(num_states):
new_variable.set_log_potential(state, random_grid[i, j, state])
multi_variables[i].append(new_variable)
alpha = .3
potts_matrix = alpha * np.eye(num_states)
potts_potentials = potts_matrix.ravel().tolist()
for i, j in itertools.product(xrange(grid_size), repeat=2):
if (j > 0):
#horizontal edge
edge_variables = [multi_variables[i][j - 1], multi_variables[i][j]]
factor_graph.create_factor_dense(edge_variables, potts_potentials)
if (i > 0):
#vertical edge
edge_variables = [multi_variables[i - 1][j], multi_variables[i][j]]
factor_graph.create_factor_dense(edge_variables, potts_potentials)
factor_graph.set_eta_ad3(.1)
factor_graph.adapt_eta_ad3(True)
factor_graph.set_max_iterations_ad3(1000)
value, marginals, edge_marginals, solver_status =\
factor_graph.solve_lp_map_ad3()
res = np.array(marginals).reshape(20, 20, 5)
plt.matshow(np.argmax(random_grid, axis=-1), vmin=0, vmax=4)
plt.matshow(np.argmax(res, axis=-1), vmin=0, vmax=4)
plt.show()
use_sequence_factors = True
if use_sequence_factors:
# Now do the same with sequence factors.
# Create a factor graph using sequence-factors which is equivalent to the
# previous one.
sequential_factor_graph = fg.PFactorGraph()
# Create a binary variable for each state at each position in the grid.
binary_variables = []
for i in xrange(grid_size):
binary_variables.append([])
for j in xrange(grid_size):
binary_variables[i].append([])
for k in xrange(num_states):
# Assign a random log-potential to each state.
state_variable = sequential_factor_graph.create_binary_variable()
log_potential = multi_variables[i][j].get_log_potential(k)
state_variable.set_log_potential(log_potential)
binary_variables[i][j].append(state_variable)
# Design the edge log-potentials.
# Right now they are diagonal and favoring smooth configurations, but
# that needs not be the case.
additional_log_potentials = []
for i in xrange(grid_size+1):
if i == 0:
num_previous_states = 1
else:
num_previous_states = num_states
if i == grid_size:
num_current_states = 1
else:
num_current_states = num_states
for k in xrange(num_previous_states):
for l in xrange(num_current_states):
if k == l and i != 0 and i != grid_size:
additional_log_potentials.append(alpha)
else:
additional_log_potentials.append(0.0)
# Create a sequential factor for each row in the grid.
# NOTE: need to keep a list of factors, otherwise the Python garbage
# collector will destroy the factor objects...
factors = []
for i in xrange(grid_size):
variables = []
num_states = []
for j in xrange(grid_size):
variables.extend(binary_variables[i][j])
num_states.append(len(binary_variables[i][j]))
factor = fg.PFactorSequence()
# Set True below to let the factor graph own the factor so that we
# don't need to delete it.
sequential_factor_graph.declare_factor(factor, variables, False)
factor.initialize(num_states)
factor.set_additional_log_potentials(additional_log_potentials)
factors.append(factor)
# Create a sequential factor for each column in the grid.
for j in xrange(grid_size):
variables = []
num_states = []
for i in xrange(grid_size):
variables.extend(binary_variables[i][j])
num_states.append(len(binary_variables[i][j]))
factor = fg.PFactorSequence()
# Set True below to let the factor graph own the factor so that we
# don't need to delete it.
sequential_factor_graph.declare_factor(factor, variables, False)
factor.initialize(num_states)
factor.set_additional_log_potentials(additional_log_potentials)
factors.append(factor)
sequential_factor_graph.set_eta_ad3(.1)
sequential_factor_graph.adapt_eta_ad3(True)
sequential_factor_graph.set_max_iterations_ad3(1000)
value, marginals, edge_marginals, solver_status = sequential_factor_graph.solve_lp_map_ad3()
res = np.array(marginals).reshape(20, 20, 5)
plt.matshow(np.argmax(res, axis=-1), vmin=0, vmax=4)
plt.show()
| lgpl-3.0 |
sebastic/QGIS | python/plugins/processing/algs/qgis/MeanAndStdDevPlot.py | 19 | 3553 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MeanAndStdDevPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class MeanAndStdDevPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
MEAN_FIELD = 'MEAN_FIELD'
STDDEV_FIELD = 'STDDEV_FIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Mean and standard deviation plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT,
self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'), self.INPUT,
ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterTableField(self.MEAN_FIELD,
self.tr('Mean field'), self.INPUT))
self.addParameter(ParameterTableField(self.STDDEV_FIELD,
self.tr('StdDev field'), self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
meanfieldname = self.getParameterValue(self.MEAN_FIELD)
stddevfieldname = self.getParameterValue(self.STDDEV_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, meanfieldname, stddevfieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[meanfieldname], width, color='r',
yerr=values[stddevfieldname],
error_kw=dict(ecolor='yellow'),
)
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
| gpl-2.0 |
466152112/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
PrashntS/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
mortonjt/scipy | scipy/signal/spectral.py | 5 | 13830 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz if `x` is measured in V and computing
the power spectrum ('spectrum') where `Pxx` has units of V**2 if `x` is
measured in V. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where Pxx has units of V**2/Hz if x is measured in V and computing
the power spectrum ('spectrum') where Pxx has units of V**2 if x is
measured in V. Defaults to 'density'.
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if x.shape[-1] < nperseg:
warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
'nperseg = x.shape[%d]'
% (nperseg, axis, x.shape[axis], axis))
nperseg = x.shape[-1]
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] > x.shape[-1]:
raise ValueError('window is longer than x.')
nperseg = win.shape[0]
# numpy 1.5.1 doesn't have result_type.
outdtype = (np.array([x[0]]) * np.array([1], 'f')).dtype.char.lower()
if win.dtype != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if noverlap is None:
noverlap = nperseg // 2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
if not detrend:
detrend_func = lambda seg: seg
elif not hasattr(detrend, '__call__'):
detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(seg):
seg = np.rollaxis(seg, -1, axis)
seg = detrend(seg)
return np.rollaxis(seg, axis, len(seg.shape))
else:
detrend_func = detrend
step = nperseg - noverlap
indices = np.arange(0, x.shape[-1]-nperseg+1, step)
if np.isrealobj(x) and return_onesided:
outshape = list(x.shape)
if nfft % 2 == 0: # even
outshape[-1] = nfft // 2 + 1
Pxx = np.empty(outshape, outdtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
# fftpack.rfft returns the positive frequency part of the fft
# as real values, packed r r i r i r i ...
# this indexing is to extract the matching real and imaginary
# parts, while also handling the pure real zero and nyquist
# frequencies.
if k == 0:
Pxx[..., (0,-1)] = xft[..., (0,-1)]**2
Pxx[..., 1:-1] = xft[..., 1:-1:2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., (0,-1)] += xft[..., (0,-1)]**2 / (k+1.0)
Pxx[..., 1:-1] += (xft[..., 1:-1:2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
else: # odd
outshape[-1] = (nfft+1) // 2
Pxx = np.empty(outshape, outdtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
if k == 0:
Pxx[..., 0] = xft[..., 0]**2
Pxx[..., 1:] = xft[..., 1::2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., 0] += xft[..., 0]**2 / (k+1)
Pxx[..., 1:] += (xft[..., 1::2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
Pxx[..., 1:-1] *= 2*scale
Pxx[..., (0,-1)] *= scale
f = np.arange(Pxx.shape[-1]) * (fs/nfft)
else:
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.fft(x_dt*win, nfft)
if k == 0:
Pxx = (xft * xft.conj()).real
else:
Pxx *= k/(k+1.0)
Pxx += (xft * xft.conj()).real / (k+1.0)
Pxx *= scale
f = fftpack.fftfreq(nfft, 1.0/fs)
if axis != -1:
Pxx = np.rollaxis(Pxx, -1, axis)
return f, Pxx
| bsd-3-clause |
treycausey/scikit-learn | sklearn/neighbors/base.py | 1 | 23445 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import safe_asarray, atleast2d_or_csr, check_arrays
from ..utils.validation import DataConversionWarning
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, **kwargs):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_kwds = kwargs
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric in ['wminkowski', 'minkowski']:
self.metric_kwds['p'] = p
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
self.effective_metric_ = self.metric
self.effective_metric_kwds_ = self.metric_kwds
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
self.effective_metric_kwds_ = self.metric_kwds.copy()
p = self.effective_metric_kwds_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_kwds_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = safe_asarray(X)
if X.ndim != 2:
raise ValueError("data type not understood")
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.tocsr()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_kwds_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_kwds_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_kwds_)
# XXX: should be implemented with a partial sort
neigh_ind = dist.argsort(axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
if return_distance:
j = np.arange(neigh_ind.shape[0])[:, None]
if self.effective_metric_ == 'euclidean':
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = safe_asarray(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_kwds_)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
ind, dist = results
return dist, ind
else:
return results
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = safe_asarray(X)
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_arrays(X, y, sparse_format="csr")
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_arrays(X, y, sparse_format="csr")
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
altairpearl/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 53 | 2668 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
bmcfee/mir_eval | setup.py | 1 | 1087 | from setuptools import setup
with open('README.rst') as file:
long_description = file.read()
setup(
name='mir_eval',
version='0.4',
description='Common metrics for common audio/music processing tasks.',
author='Colin Raffel',
author_email='[email protected]',
url='https://github.com/craffel/mir_eval',
packages=['mir_eval'],
long_description=long_description,
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
'Development Status :: 5 - Production/Stable',
"Intended Audience :: Developers",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
],
keywords='audio music mir dsp',
license='MIT',
install_requires=[
'numpy >= 1.7.0',
'scipy >= 0.14.0',
'future',
'six'
],
extras_require={
'display': ['matplotlib>=1.5.0',
'scipy>=0.16.0'],
'testing': ['matplotlib>=2.1.0']
}
)
| mit |
anand-c-goog/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 3 | 2595 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.classifier import Classifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.head import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.head import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.random_forest import TensorForestEstimator
from tensorflow.contrib.learn.python.learn.estimators.random_forest import TensorForestLossMonitor
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
schreiberx/sweet | benchmarks_sphere/paper_jrn_parco_rexi_nonlinear/test_ln2_convergence/postprocessing_consolidate.py | 1 | 2722 | #! /usr/bin/env python3
import sys
import math
from mule_local.JobMule import *
from mule.plotting.Plotting import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
groups = ['runtime.timestepping_method', 'runtime.max_simulation_time']
tagnames_y = [
'sphere_data_diff_prog_h.norm_l1',
'sphere_data_diff_prog_h.norm_linf',
]
j = JobsData('./job_bench_*', verbosity=0)
c = JobsDataConsolidate(j)
print("")
print("Groups:")
job_groups = c.create_groups(groups)
for key, g in job_groups.items():
print(key)
# Filter out errors beyond this value!
def data_filter(x, y, jobdata):
x = float(x)
y = float(y)
return False
# Filter out NaNs for wallclock time studies
# NaNs require significantly more computation time
if math.isnan(y):
return True
if y > 10.0:
return True
return False
for tagname_y in tagnames_y:
print("*"*80)
print("Processing tagname "+tagname_y)
print("*"*80)
params = [
{
'tagname_x': 'runtime.timestep_size',
'xlabel': "Timestep size",
'ylabel': tagname_y,
'title': 'Timestep size vs. error',
'xscale': 'log',
'yscale': 'log',
},
]
"""
params += [
{
'tagname_x': 'output.simulation_benchmark_timings.main_timestepping',
'xlabel': "Wallclock time (seconds)",
'ylabel': tagname_y,
'title': 'Wallclock time vs. error',
'xscale': 'log',
'yscale': 'log',
},
]
"""
for param in params:
tagname_x = param['tagname_x']
xlabel = param['xlabel']
ylabel = param['ylabel']
title = param['title']
xscale = param['xscale']
yscale = param['yscale']
print("*"*80)
print("Processing tag "+tagname_x)
print("*"*80)
#if True:
if False:
"""
Table format
"""
d = JobsData_GroupsDataTable(
job_groups,
tagname_x,
tagname_y,
data_filter = data_filter
)
fileid = "output_table_"+tagname_x.replace('.', '-').replace('_', '-')+"_vs_"+tagname_y.replace('.', '-').replace('_', '-')
print("Data table:")
d.print()
#d.write(fileid+".csv")
if True:
"""
Plotting format
"""
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y,
data_filter = data_filter
)
for key, group_data in d.get_data_float().items():
print("Group: "+key)
prev_value = -1.0
conv = '-'
for (x, y) in zip(group_data['x_values'], group_data['y_values']):
if prev_value >= 0:
conv = y/prev_value
print("\t"+str(x)+"\t=>\t"+str(y)+"\tconvergence: "+str(conv))
prev_value = y
print("Info:")
print(" NaN: Errors in simulations")
print(" None: No data available")
| mit |
shadmanj/college-code | BME301-Bioelectricity/hodgkin-huxley-solver-PYTHON/2nd-order-DE-solver.py | 1 | 3308 | #Shadman Jubaer
#BME 301
#HW 2
#These functions will numerically solve a second order differential
#equation using either Euler or Runge-Kutta numerical solving methods.
#------------------------------------------------------------
import math as m
import matplotlib.pyplot as plt
#Generates interval over which to solve numerically
#Interval created between a start and stop value with
#a given step-size.
def irange(start,stop,step):
while start <= stop:
yield start
start += step
#Solves 2nd Order ODE using Euler
#Input arguments:
#ode_u1 = y'(t)
#ode_u2 = y''(t)
#interval: x domain over which to solve
#initial_value_u1: initial y value
#initial value_u2: initial y' value
#Outputs:
#y: numerically solved y(t) values
#yp: numerically solved y'(t) values
def euler(ode_u1,ode_u2,interval,step_size,initial_value_u1,initial_value_u2):
u1 = initial_value_u1
u2 = initial_value_u2
h = step_size
y = []
yp = []
for i in interval:
u2 = u2 + h*ode_u2(i,u1,u2)
yp.append(u2)
u1 = u1 + h*ode_u1(i,u1,u2)
y.append(u1)
return y, yp
#Solves 2nd Order ODE using Runge-Kutta
#Solves 2nd Order ODE using Euler
#Input arguments:
#ode_u1 = y'(t)
#ode_u2 = y''(t)
#interval: x domain over which to solve
#initial_value_u1: initial y value
#initial value_u2: initial y' value
#Outputs:
#y: numerically solved y(t) values
#yp: numerically solved y'(t) values
def rk(ode_u1,ode_u2,interval,step_size,initial_value_u1,initial_value_u2):
u1 = initial_value_u1
u2 = initial_value_u2
h = step_size
y = []
yp = []
for i in interval:
k1p = ode_u2(i,u1,u2)
k2p = ode_u2(i+(h/2),u1,u2)
k3p = ode_u2(i+(h/2),u1,u2)
k4p = ode_u2(i+(h),u1,u2)
u2 = u2 + h*((k1p + k2p*2 + k3p*2 + k4p)/6)
yp.append(u2)
k1 = ode_u1(i,u1,u2)
k2 = ode_u1(i+(h/2),u1,u2)
k3 = ode_u1(i+(h/2),u1,u2)
k4 = ode_u1(i+(h),u1,u2)
u1 = u1 + h*((k1 + k2*2 + k3*2 + k4)/6)
y.append(u1)
return y, yp
#----------------------- TEST FUNCTIONS ---------------------
'''
#Returns y'
def u1(t,u1,u2):
u1_prime = u2
return u1_prime
#y'' = -sin(t)
def u2(t,u1,u2):
u2_prime = -m.sin(t)
return u2_prime
#------------------- -------------------------
#------------------- TEST -------------------------
#------------------- -------------------------
start = 0
stop = 50*m.pi
N = 1000
h = (stop-start)/(N-1)
x_hw = list(i for i in irange(start,stop,h))
y_initial = m.sin(start)
y_prime_initial = m.cos(start)
ans_euler, ans_eulerp = euler(u1,u2,x_hw, h, y_initial, y_prime_initial)
y_analitical = [m.sin(step) for step in x_hw]
y_analitical_p = [m.cos(step) for step in x_hw]
ans_rk, ans_rkp = rk(u1,u2,x_hw, h, y_initial, y_prime_initial)
#--------------------------------------------------------------
t = x_hw
plt.plot(t, ans_euler, 'blue', t, ans_rk, 'red',t, y_analitical,'green')
plt.show()
#--------------------------------------------------------------
'''
| mit |
bikong2/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
MKLab-ITI/news-popularity-prediction | news_popularity_prediction/learning/ranking.py | 1 | 12176 | __author__ = 'Georgios Rizos ([email protected])'
import os
import numpy as np
from scipy.stats import kendalltau
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.cross_validation import KFold
from news_popularity_prediction.datautil.common import get_threads_number
from news_popularity_prediction.datautil.feature_rw import h5_open, h5_close, h5load_from, h5store_at,\
load_sklearn_model, store_sklearn_model
def random_ranking(y_train, X_test):
pass
def initialize_k_evaluation_measures(number_of_k,
number_of_folds,
number_of_features):
kendall_tau_score_array = np.empty((number_of_k, number_of_folds), dtype=np.float64)
p_value_array = np.empty((number_of_k, number_of_folds), dtype=np.float64)
mean_square_error = np.empty((number_of_k, number_of_folds), dtype=np.float64)
top_k_jaccard = np.empty((number_of_k, number_of_folds), dtype=np.float64)
feature_importances_array = np.empty([number_of_k, number_of_folds, number_of_features], dtype=np.float64)
k_evaluation_measures = [kendall_tau_score_array,
p_value_array,
mean_square_error,
top_k_jaccard,
feature_importances_array]
return k_evaluation_measures
def update_k_evaluation_measures(k_evaluation_measures,
k_index,
evaluation_measure_arrays):
k_evaluation_measures[0][k_index, :] = evaluation_measure_arrays[0]
k_evaluation_measures[1][k_index, :] = evaluation_measure_arrays[1]
k_evaluation_measures[2][k_index, :] = evaluation_measure_arrays[2]
k_evaluation_measures[3][k_index, :] = evaluation_measure_arrays[3]
try:
k_evaluation_measures[4][k_index, :, :] = np.squeeze(evaluation_measure_arrays[4])
except ValueError:
k_evaluation_measures[4][k_index, :, :] = evaluation_measure_arrays[4]
print("Kendall's tau: ", np.mean(evaluation_measure_arrays[0]))
print("Mean MSE: ", np.mean(evaluation_measure_arrays[2]))
print("Top-100 Jaccard: ", np.mean(evaluation_measure_arrays[3]))
def store_k_evaluation_measures(store_path,
k_list,
k_evaluation_measures,
feature_column_names):
number_of_folds = k_evaluation_measures[0].shape[1]
h5_store = h5_open(store_path + "results.h5")
for fold_index in range(number_of_folds):
data_frame = pd.DataFrame(k_evaluation_measures[0][:, fold_index], columns=["kendall_tau"], index=k_list)
h5store_at(h5_store,
"/data/kendall_tau/fold" + str(fold_index),
data_frame)
data_frame = pd.DataFrame(k_evaluation_measures[1][:, fold_index], columns=["p_value"], index=k_list)
h5store_at(h5_store,
"/data/p_value/fold" + str(fold_index),
data_frame)
data_frame = pd.DataFrame(k_evaluation_measures[2][:, fold_index], columns=["mse"], index=k_list)
h5store_at(h5_store,
"/data/mse/fold" + str(fold_index),
data_frame)
data_frame = pd.DataFrame(k_evaluation_measures[3][:, fold_index], columns=["jaccard"], index=k_list)
h5store_at(h5_store,
"/data/top_k_jaccard/fold" + str(fold_index),
data_frame)
data_frame = pd.DataFrame(k_evaluation_measures[4][:, fold_index, :], columns=feature_column_names, index=k_list)
h5store_at(h5_store,
"/data/feature_importances/fold" + str(fold_index),
data_frame)
h5_close(h5_store)
def load_k_evaluation_measures(store_path,
number_of_folds=10):
h5_store = h5_open(store_path + "results.h5")
kendall_tau_keys = ["/data/" + "kendall_tau/fold" + str(fold_index) for fold_index in range(number_of_folds)]
p_value_keys = ["/data/" + "p_value/fold" + str(fold_index) for fold_index in range(number_of_folds)]
mse_keys = ["/data/" + "mse/fold" + str(fold_index) for fold_index in range(number_of_folds)]
jaccard_keys = ["/data/" + "top_k_jaccard/fold" + str(fold_index) for fold_index in range(number_of_folds)]
feature_importances_keys = ["/data/" + "feature_importances/fold" + str(fold_index) for fold_index in range(number_of_folds)]
if (len(kendall_tau_keys) != len(p_value_keys)) or\
(len(kendall_tau_keys) != len(feature_importances_keys)):
print("Fold number different for evaluation measures load.")
raise RuntimeError
number_of_folds = len(feature_importances_keys)
data_frame = h5load_from(h5_store, feature_importances_keys[0])
k_list = data_frame.index
number_of_samples = k_list.size
feature_names_list = data_frame.columns
number_of_features = len(feature_names_list)
kendall_tau_array = np.empty((number_of_samples,
number_of_folds),
dtype=np.float64)
p_value_array = np.empty((number_of_samples,
number_of_folds),
dtype=np.float64)
mean_square_error = np.empty((number_of_samples,
number_of_folds), dtype=np.float64)
top_k_jaccard = np.empty((number_of_samples,
number_of_folds), dtype=np.float64)
feature_importances_array = np.empty((number_of_samples,
number_of_folds,
number_of_features),
dtype=np.float64)
for f in range(number_of_folds):
kendall_tau_key = kendall_tau_keys[f]
p_value_key = p_value_keys[f]
mse_key = mse_keys[f]
jaccard_key = jaccard_keys[f]
feature_importances_key = feature_importances_keys[f]
kendall_tau_data_frame = h5load_from(h5_store, kendall_tau_key)
p_value_data_frame = h5load_from(h5_store, p_value_key)
mse_data_frame = h5load_from(h5_store, mse_key)
jaccard_data_frame = h5load_from(h5_store, jaccard_key)
feature_importances_data_frame = h5load_from(h5_store, feature_importances_key)
kendall_tau_array[:, f] = np.squeeze(kendall_tau_data_frame.values)
p_value_array[:, f] = np.squeeze(p_value_data_frame.values)
mean_square_error[:, f] = np.squeeze(mse_data_frame.values)
top_k_jaccard[:, f] = np.squeeze(jaccard_data_frame.values)
try:
feature_importances_array[:, f, :] = np.squeeze(feature_importances_data_frame.values)
except ValueError:
feature_importances_array[:, f, :] = feature_importances_data_frame.values
k_evaluation_measures = (kendall_tau_array,
p_value_array,
mean_square_error,
top_k_jaccard,
feature_importances_array)
return k_list, k_evaluation_measures, feature_names_list
def form_ground_truth(y_raw):
return y_raw, y_raw
def initialize_evaluation_measure_arrays(number_of_folds,
number_of_features):
kendall_tau_score_array = np.empty(number_of_folds,
dtype=np.float64)
p_value_array = np.empty(number_of_folds,
dtype=np.float64)
mse_array = np.empty(number_of_folds,
dtype=np.float64)
jaccard_array = np.empty(number_of_folds,
dtype=np.float64)
feature_importances_array = np.empty((number_of_folds,
number_of_features),
dtype=np.float64)
measure_arrays_list = [kendall_tau_score_array,
p_value_array,
mse_array,
jaccard_array,
feature_importances_array]
return measure_arrays_list
def folding(y, n_folds):
k_fold = KFold(y.size, n_folds=n_folds, random_state=0)
return k_fold
def learning_module(file_path,
X_train,
X_test,
y_train,
y_test,
train_test,
baseline=None):
if baseline is None:
regressor_fitted = get_regressor_fitted(file_path,
X_train,
X_test,
y_train,
y_test)
y_pred = regressor_fitted.predict(X_test)
test = train_test[1]
max_pred_index = np.argmax(y_pred)
max_pred_index = test[max_pred_index]
feature_importances = regressor_fitted.feature_importances_
else:
if baseline == "mean":
y_pred = np.ones_like(y_test) * baseline_mean(y_train)
elif baseline == "median":
y_pred = np.ones_like(y_test) * baseline_median(y_train)
else:
print("Invalid baseline method.")
raise RuntimeError
feature_importances = np.empty((1, 0))
# Test.
kendall_tau_score, p_value = kendalltau(y_test, y_pred)
mse = np.mean(np.power(y_test - y_pred, 2))
top_k_jaccard = top_k_jaccard_score(y_test, y_pred, top_k=100)
ranking_evaluation_tuple = [kendall_tau_score, p_value,
mse, top_k_jaccard,
feature_importances]
return ranking_evaluation_tuple
def baseline_mean(y_train, osn=None):
if osn is None:
return np.mean(y_train)
else:
raise RuntimeError
def baseline_median(y_train, osn=None):
if osn is None:
return np.median(y_train)
else:
raise RuntimeError
def top_k_jaccard_score(x, y, top_k):
l = x.size
x_index = np.argsort(x)
y_index = np.argsort(y)
jaccard = jaccard_index(x_index[l - top_k:l], y_index[l - top_k:l])
return jaccard
def jaccard_index(x, y):
nom = np.intersect1d(x, y).size
denom = np.union1d(x, y).size
return nom/denom
def get_regressor_fitted(file_path,
X_train,
X_test,
y_train,
y_test):
if os.path.exists(file_path):
try:
regressor_fitted = load_sklearn_model(file_path)
except EOFError as e:
print(file_path)
raise e
else:
regressor = RandomForestRegressor(n_estimators=50,
criterion="mse",
max_features="auto",
n_jobs=get_threads_number())
regressor_fitted = regressor.fit(X_train, y_train)
store_sklearn_model(file_path, regressor_fitted)
return regressor_fitted
def update_evaluation_measure_arrays(evaluation_measure_arrays,
fold_index,
evaluation_tuple):
evaluation_measure_arrays[0][fold_index] = evaluation_tuple[0] # Kendall's tau
evaluation_measure_arrays[1][fold_index] = evaluation_tuple[1] # p-value
evaluation_measure_arrays[2][fold_index] = evaluation_tuple[2]
evaluation_measure_arrays[3][fold_index] = evaluation_tuple[3]
evaluation_measure_arrays[4][fold_index, :] = evaluation_tuple[4] # Feature weights
def is_k_valid(i,
inv_cum_dist_dict,
number_of_folds):
k_is_valid = True
for r_inv_cum_dist in inv_cum_dist_dict.values():
discrete_r_list = list()
for r_list in r_inv_cum_dist[-i-1:]:
discrete_r_list.extend(r_list)
discrete_r_list = set(discrete_r_list)
if not len(discrete_r_list) > 50:
k_is_valid = False
break
return k_is_valid
| apache-2.0 |
rs2/pandas | pandas/tests/test_lib.py | 2 | 7969 | import numpy as np
import pytest
from pandas._libs import lib, writers as libwriters
import pandas as pd
from pandas import Index
import pandas._testing as tm
class TestMisc:
def test_max_len_string_array(self):
arr = a = np.array(["foo", "b", np.nan], dtype="object")
assert libwriters.max_len_string_array(arr) == 3
# unicode
arr = a.astype("U").astype(object)
assert libwriters.max_len_string_array(arr) == 3
# bytes for python3
arr = a.astype("S").astype(object)
assert libwriters.max_len_string_array(arr) == 3
# raises
msg = "No matching signature found"
with pytest.raises(TypeError, match=msg):
libwriters.max_len_string_array(arr.astype("U"))
def test_fast_unique_multiple_list_gen_sort(self):
keys = [["p", "a"], ["n", "d"], ["a", "s"]]
gen = (key for key in keys)
expected = np.array(["a", "d", "n", "p", "s"])
out = lib.fast_unique_multiple_list_gen(gen, sort=True)
tm.assert_numpy_array_equal(np.array(out), expected)
gen = (key for key in keys)
expected = np.array(["p", "a", "n", "d", "s"])
out = lib.fast_unique_multiple_list_gen(gen, sort=False)
tm.assert_numpy_array_equal(np.array(out), expected)
def test_fast_unique_multiple_unsortable_runtimewarning(self):
arr = [np.array(["foo", pd.Timestamp("2000")])]
with tm.assert_produces_warning(RuntimeWarning):
lib.fast_unique_multiple(arr, sort=None)
class TestIndexing:
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
indices = np.array([], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
indices = np.arange(0, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2], [2, 0, -2]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_right_edge(self):
target = np.arange(100)
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
indices = np.arange(start, 99, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
indices = np.array([97, 98, 99, 100], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
msg = "index 100 is out of bounds for axis (0|1) with size 100"
with pytest.raises(IndexError, match=msg):
target[indices]
with pytest.raises(IndexError, match=msg):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
with pytest.raises(IndexError, match=msg):
target[indices]
with pytest.raises(IndexError, match=msg):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_both_edges(self):
target = np.arange(10)
# slice
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_middle(self):
target = np.arange(100)
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
indices = np.arange(start, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
assert not isinstance(maybe_slice, slice)
tm.assert_numpy_array_equal(maybe_slice, indices)
tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_booleans_to_slice(self):
arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)
result = lib.maybe_booleans_to_slice(arr)
assert result.dtype == np.bool_
result = lib.maybe_booleans_to_slice(arr[:0])
assert result == slice(0, 0)
def test_get_reverse_indexer(self):
indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64)
result = lib.get_reverse_indexer(indexer, 5)
expected = np.array([4, 2, 3, 6, 7], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_cache_readonly_preserve_docstrings():
# GH18197
assert Index.hasnans.__doc__ is not None
| bsd-3-clause |
DouglasLeeTucker/DECam_PGCM | bin/y2a1_rawdata.py | 1 | 66323 | #!/usr/bin/env python
"""
y2a1_rawdata.py
Example:
y2a1_rawdata.py --help
y2a1_rawdata.py --tag Y2A1_FINALCUT --baseName y2a1_rawdata --verbose 2
"""
##################################
def main():
import argparse
import time
"""Create command line arguments"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--baseName', help='base name of the output CSV file', default='rawdata')
parser.add_argument('--fileNamePattern', help='pattern of filenames to be globbed (used by combine_stilts_internal_match_files and reformat_stilts_internal_match_files_for_gcm_exp)', default='y2a1_rawdata.svy1y2y3.sorted.tmp.z.csv.??.inmatch')
parser.add_argument('--outputFileName', help='name of an output file', default='output.csv')
parser.add_argument('--tag', help='PROCTAG tag name', default='Y2A1_FINALCUT')
parser.add_argument('--reqnum', help='REQNUM (used by se_object_query_by_reqnum)', default=2285, type=int)
parser.add_argument('--expnumMin', help='Minimum value for EXPNUM', default=1, type=int)
parser.add_argument('--expnumMax', help='Maximum value for EXPNUM', default=99999999, type=int)
parser.add_argument('--nite', help='nite to query (used by se_object_query_by_nite_and_band)', default='20130113')
parser.add_argument('--bands', help='comma-separated list of filter bands to consider', default='g,r,i,z,Y')
parser.add_argument('--band', help='single filter band to consider (used by se_object_query_by_nite_and_band)', default='u')
parser.add_argument('--cleanup', help='remove less interesting intermediate files', default=False, action='store_true')
parser.add_argument('--do_exposure_query',help='do exposure query', default=False, action='store_true')
parser.add_argument('--do_reqnum_query',help='do reqnum query', default=False, action='store_true')
parser.add_argument('--do_se_object_query_by_reqnum',help='do object query', default=False, action='store_true')
parser.add_argument('--do_se_object_query_by_expnumRange',help='do object query', default=False, action='store_true')
parser.add_argument('--do_reqnum_query_results_to_csv_band_files',help='do convert reqnum_query results to a set of CSV files (one per filter band)', default=False, action='store_true')
parser.add_argument('--do_nite_band_query_results_to_csv_band_files',help='do convert night_band_query results to a set of CSV files (one per filter band)', default=False, action='store_true')
parser.add_argument('--do_add_exp_info',help='do add exposure info to the se_object files', default=False, action='store_true')
parser.add_argument('--do_combine_stilts_internal_match_files',help='do combine STILTS internally matched files', default=False, action='store_true')
parser.add_argument('--do_reformat_stilts_internal_match_files_for_gcm_exp',help='do reformat STILTS internally matched filesfor GCM (exposure-by-exposure fit)', default=False, action='store_true')
parser.add_argument('--do_blacklist_query',help='do blacklist query', default=False, action='store_true')
parser.add_argument('--do_y3a1_blacklist_query',help='do y3a1_blacklist query', default=False, action='store_true')
parser.add_argument('--do_image_query',help='do image query', default=False, action='store_true')
parser.add_argument('--do_exp_image_cat_y3a1blacklist_query',help='do exposure/image/catalog/y3a1_blacklist query and merge', default=False, action='store_true')
parser.add_argument('--do_se_object_query_by_nite_and_band',help='do nite/band objects query', default=False, action='store_true')
parser.add_argument('--verbose', help='verbosity level of output to screen (0,1,2,...)', default=0, type=int)
args = parser.parse_args()
if args.verbose > 0: print args
# Checking bandList...
supportedBandList = ['u','g','r','i','z','Y']
bandList = args.bands.strip().split(',')
badBandCount = 0
for band in bandList:
try:
supportedBandIndex = supportedBandList.index(band)
except:
print """Filter band %s not found in list of supported bands...""" % (band)
badBandCount = badBandCount + 1
if badBandCount > 0:
print """The list of supported bands is %s""" % (supportedBandList)
print """Note that the filter band names in this list are case-sensitive."""
print """Exiting now!..."""
print
return 1
# Run exposure query...
if args.do_exposure_query==True:
status = exposure_query(args)
# Run object query by reqnum...
if args.do_se_object_query_by_reqnum==True:
status = se_object_query_by_reqnum(args)
# Run object query by expnum range...
if args.do_se_object_query_by_expnumRange==True:
status = se_object_query_by_expnumRange(args)
# Run object query by nite and band...
if args.do_se_object_query_by_nite_and_band==True:
status = se_object_query_by_nite_and_band(args)
# Run query to find unique list of reqnums for
# a given processing tag and within a given expnum range...
if args.do_reqnum_query==True:
reqnum_query(args)
# Convert results from reqnum_query to a set of
# CSV files (one per filter band)...
if args.do_reqnum_query_results_to_csv_band_files==True:
reqnum_query_results_to_csv_band_files(args)
# Convert results from nite_band_query to a set of
# CSV files (one per filter band)...
if args.do_nite_band_query_results_to_csv_band_files==True:
nite_band_query_results_to_csv_band_files(args)
# Add exposure info to the se_object files...
if args.do_add_exp_info==True:
add_exp_info(args)
# Combine files from STILTS internal match into a
# single large file...
if args.do_combine_stilts_internal_match_files==True:
combine_stilts_internal_match_files(args)
# Reformat files from STILTS internal match into a
# single large file that is ingestible by GCM for
# the exposure-by-exposure fit...
if args.do_reformat_stilts_internal_match_files_for_gcm_exp==True:
reformat_stilts_internal_match_files_for_gcm_exp(args)
# Run blacklist query...
if args.do_blacklist_query==True:
status = blacklist_query(args)
# Run y3a1_blacklist query...
# (this includes Eli Rykoff's spread_model blacklist just for y3a1...)
if args.do_y3a1_blacklist_query==True:
status = y3a1_blacklist_query(args)
# Run image query...
if args.do_image_query==True:
status = image_query(args)
if args.do_exp_image_cat_y3a1blacklist_query==True:
status = exp_image_cat_y3a1blacklist_query(args)
##################################
#
def exposure_query(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'exposure_query'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
print 'TBD...'
connection=ea.connect('desoper')
query="""
select e.expnum, t.unitname, t.reqnum, t.attnum,
e.radeg as EXPRA, e.decdeg as EXPDEC, e.exptime, e.airmass, e.band,
e.nite, e.mjd_obs, e.field, e.object, e.program,
qa.*
from prod.exposure e, prod.proctag t, prod.qa_summary qa
where t.pfw_attempt_id=qa.pfw_attempt_id and
e.expnum=qa.expnum and
t.tag='%s' and
qa.expnum between %d and %d
order by qa.expnum
""" % (args.tag, args.expnumMin, args.expnumMax)
if args.verbose>0: print query
outputFile="""%s.expinfo.csv""" % (args.baseName)
connection.query_and_save(query,outputFile)
connection.close()
if args.verbose>0: print
return 0
##################################
#
# Based on query from Eli Rykoff and Dave Burke, which
# is in turn is based on a query from Robert Gruendl.
def se_object_query_orig(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'y2a1_se_object_query'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
with x as (
select /*+materialize */ pfw_attempt_id
from prod.proctag
where tag='Y2A1_FINALCUT'
),
y as (
select /*+materialize */ a.expnum as expnum, max(a.lastchanged_time) as evaltime
source from prod.finalcut_eval a
where a.analyst!='SNQUALITY'
and (upper(a.program)='SURVEY' or upper(a.program)='SUPERNOVA')
group by a.expnum
),
z as (
select /*+materialize */ b.expnum
from prod.finalcut_eval b, y
where b.expnum=y.expnum
and b.lastchanged_time=y.evaltime
and b.accepted='True'
)
select o.ra, o.dec, o.flux_psf, o.fluxerr_psf, o.class_star,
o.spread_model, cast(o.band as VARCHAR(1)) as band,
cast(c.expnum as NUMBER(8)) as expnum,
cast(c.ccdnum as NUMBER(4)) as ccdnum, e.mjd_obs, e.exptime,
e.tradeg, e.tdecdeg, cast(e.ha as varchar(12)) as ha
from prod.se_object o, prod.catalog c, prod.exposure e, x, z
where x.pfw_attempt_id=c.pfw_attempt_id
and c.filetype='cat_finalcut'
and c.expnum=z.expnum
and e.expnum=c.expnum
and o.filename=c.filename
and (o.x_image between 100 and 1900)
and (o.y_image between 100 and 3950)
and o.flags = 0
and o.class_star > 0.50
and o.spread_model < 0.01
and ((1.086*o.fluxerr_psf/o.flux_psf) between 0.001 and 0.100)
and (c.expnum between 383995 and 410586)"""
if args.verbose>0: print query
outputFiles="""%s.objinfo.fits""" % (args.baseName)
connection.query_and_save(query,outputFiles)
connection.close()
if args.verbose>0: print
return 0
##################################
#
# Based on query from Eli Rykoff and Dave Burke, which
# is in turn is based on a query from Robert Gruendl.
def se_object_query_by_reqnum_old(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'y2a1_se_object_query_by_reqnum_old'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
with x as (
select /*+materialize */ pfw_attempt_id
from prod.proctag
where tag='%s' and reqnum=%d
),
y as (
select /*+materialize */ a.expnum as expnum, max(a.lastchanged_time) as evaltime
from prod.finalcut_eval a
where a.analyst!='SNQUALITY'
group by a.expnum
),
z as (
select /*+materialize */ b.expnum
from prod.finalcut_eval b, y
where b.expnum=y.expnum
and b.lastchanged_time=y.evaltime
and b.accepted='True'
)
select c.expnum, c.filename,
cast(o.band as VARCHAR(1)) as BAND,
c.ccdnum as CCDNUM,
o.object_number, o.x_image, o.y_image, o.ra, o.dec,
o.flux_psf, o.fluxerr_psf, o.flux_aper_8, fluxerr_aper_8,
o.class_star, o.spread_model, o.spreaderr_model, o.flags
from prod.se_object o, prod.catalog c, x, z
where x.pfw_attempt_id=c.pfw_attempt_id
and c.filetype='cat_finalcut'
and c.expnum=z.expnum
and o.filename=c.filename
and o.flags = 0
and o.class_star > 0.80
and o.spread_model < 0.01
and ((1.086*o.fluxerr_psf/o.flux_psf) between 0.001 and 0.01)
and (c.expnum between %d and %d)""" % \
(args.tag, args.reqnum, args.expnumMin, args.expnumMax)
if args.verbose>0: print query
outputFiles="""%s.%s.%d-%d.%d.objinfo.fits""" % \
(args.baseName, args.tag, args.expnumMin, args.expnumMax, args.reqnum)
connection.query_and_save(query,outputFiles)
connection.close()
if args.verbose>0: print
return 0
##################################
#
# Based on query from Eli Rykoff and Dave Burke, which
# is in turn is based on a query from Robert Gruendl.
def se_object_query_by_reqnum(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'y2a1_se_object_query_by_reqnum'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
with x as (
select /*+materialize */ t.pfw_attempt_id, qa.expnum
from prod.proctag t, prod.qa_summary qa
where t.pfw_attempt_id=qa.pfw_attempt_id and
tag='%s' and reqnum=%d and
qa.expnum between %d and %d
)
select c.expnum, c.filename,
cast(o.band as VARCHAR(1)) as BAND,
c.ccdnum as CCDNUM,
o.object_number, o.x_image, o.y_image, o.ra, o.dec,
o.flux_psf, o.fluxerr_psf, o.flux_aper_8, fluxerr_aper_8,
o.class_star, o.spread_model, o.spreaderr_model, o.flags
from prod.se_object o, prod.catalog c, x
where x.pfw_attempt_id=c.pfw_attempt_id
and c.filetype='cat_finalcut'
and c.expnum=x.expnum
and o.filename=c.filename
and o.flags = 0
and o.class_star > 0.80
and o.spread_model < 0.01
and ((1.086*o.fluxerr_psf/o.flux_psf) between 0.001 and 0.01)
""" % \
(args.tag, args.reqnum, args.expnumMin, args.expnumMax)
if args.verbose>0: print query
outputFiles="""%s.%s.%d-%d.%d.objinfo.fits""" % \
(args.baseName, args.tag, args.expnumMin, args.expnumMax, args.reqnum)
connection.query_and_save(query,outputFiles)
connection.close()
if args.verbose>0: print
return 0
##################################
#
# Based on query from Eli Rykoff and Dave Burke, which
# is in turn is based on a query from Robert Gruendl.
def se_object_query_by_nite_and_band(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'y2a1_se_object_query_by_nite_and_band'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
# Note looser cuts for o.flags and "magerr_psf"...
query="""
with x as (
select /*+materialize */ t.pfw_attempt_id, qa.expnum, e.band
from prod.proctag t, prod.qa_summary qa, prod.exposure e
where t.pfw_attempt_id=qa.pfw_attempt_id and
qa.expnum=e.expnum and
t.tag='%s' and
e.nite='%s' and
e.band='%s'
)
select c.expnum, c.filename,
x.band,
c.ccdnum as CCDNUM,
o.object_number, o.x_image, o.y_image, o.ra, o.dec,
o.flux_psf, o.fluxerr_psf, o.flux_aper_8, o.fluxerr_aper_8,
o.class_star, o.spread_model, o.spreaderr_model, o.flags
from prod.se_object o, prod.catalog c, x
where x.pfw_attempt_id=c.pfw_attempt_id
and c.filetype='cat_finalcut'
and c.expnum=x.expnum
and o.filename=c.filename
and o.flags < 3
and o.class_star > 0.80
and o.spread_model < 0.01
and ((1.086*o.fluxerr_psf/o.flux_psf) between 0.001 and 0.03)
""" % \
(args.tag, args.nite, args.band)
if args.verbose>0: print query
outputFiles="""%s.%s.%s.%s.objinfo.fits""" % \
(args.baseName, args.tag, args.nite, args.band)
connection.query_and_save(query,outputFiles)
connection.close()
if args.verbose>0: print
return 0
##################################
#
# Based on query from Eli Rykoff and Dave Burke, which
# is in turn is based on a query from Robert Gruendl.
def se_object_query_by_expnumRange_old(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'y2a1_se_object_query'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
with x as (
select /*+materialize */ pfw_attempt_id
from prod.proctag
where tag='%s'
),
y as (
select /*+materialize */ a.expnum as expnum, max(a.lastchanged_time) as evaltime
from prod.finalcut_eval a
where a.analyst!='SNQUALITY'
group by a.expnum
),
z as (
select /*+materialize */ b.expnum
from prod.finalcut_eval b, y
where b.expnum=y.expnum
and b.lastchanged_time=y.evaltime
and b.accepted='True'
)
select c.expnum, c.filename,
cast(o.band as VARCHAR(1)) as BAND,
c.ccdnum as CCDNUM,
o.object_number, o.x_image, o.y_image, o.ra, o.dec,
o.flux_psf, o.fluxerr_psf, o.flux_aper_8, fluxerr_aper_8,
o.class_star, o.spread_model, o.spreaderr_model, o.flags
from prod.se_object o, prod.catalog c, x, z
where x.pfw_attempt_id=c.pfw_attempt_id
and c.filetype='cat_finalcut'
and c.expnum=z.expnum
and o.filename=c.filename
and o.flags = 0
and o.class_star > 0.80
and o.spread_model < 0.01
and ((1.086*o.fluxerr_psf/o.flux_psf) between 0.001 and 0.01)
and (c.expnum between %d and %d)""" % \
(args.tag, args.expnumMin, args.expnumMax)
if args.verbose>0: print query
outputFiles="""%s.%s.exp%d-%d.objinfo.fits""" % \
(args.baseName, args.tag, args.expnumMin, args.expnumMax)
connection.query_and_save(query,outputFiles)
connection.close()
if args.verbose>0: print
return 0
##################################
##################################
#
# Based on query from Eli Rykoff and Dave Burke, which
# is in turn is based on a query from Robert Gruendl.
def se_object_query_by_expnumRange(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'y2a1_se_object_query'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
with x as (
select /*+materialize */ t.pfw_attempt_id, qa.expnum
from prod.proctag t, prod.qa_summary qa
where t.pfw_attempt_id=qa.pfw_attempt_id and
tag='%s' and
qa.expnum between %d and %d
)
select c.expnum, c.filename,
cast(o.band as VARCHAR(1)) as BAND,
c.ccdnum as CCDNUM,
o.object_number, o.x_image, o.y_image, o.ra, o.dec,
o.flux_psf, o.fluxerr_psf, o.flux_aper_8, fluxerr_aper_8,
o.class_star, o.spread_model, o.spreaderr_model, o.flags
from prod.se_object o, prod.catalog c, x
where x.pfw_attempt_id=c.pfw_attempt_id
and c.filetype='cat_finalcut'
and c.expnum=x.expnum
and o.filename=c.filename
and o.flags = 0
and o.class_star > 0.80
and o.spread_model < 0.01
and ((1.086*o.fluxerr_psf/o.flux_psf) between 0.001 and 0.01)
""" % \
(args.tag, args.expnumMin, args.expnumMax)
if args.verbose>0: print query
outputFiles="""%s.%s.exp%d-%d.objinfo.fits""" % \
(args.baseName, args.tag, args.expnumMin, args.expnumMax)
connection.query_and_save(query,outputFiles)
connection.close()
if args.verbose>0: print
return 0
##################################
#
# Based on query from Robert Gruendl.
def reqnum_query_old(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'reqnum_query_old'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
with x as (
select /*+materialize */ pfw_attempt_id, unitname, reqnum, attnum
from prod.proctag
where tag='%s'
),
y as (
select /*+materialize */ a.expnum as expnum, max(a.lastchanged_time) as evaltime
from prod.finalcut_eval a
where a.analyst!='SNQUALITY'
group by a.expnum
),
z as (
select /*+materialize */ b.expnum, b.unitname, b.reqnum, b.attnum
from prod.finalcut_eval b, y
where b.expnum=y.expnum and
b.lastchanged_time=y.evaltime and
b.accepted='True'
)
select distinct x.reqnum
from x, z
where z.unitname=x.unitname and
z.reqnum=x.reqnum and
z.attnum=x.attnum and
z.expnum between %d and %d
order by x.reqnum
""" % (args.tag, args.expnumMin, args.expnumMax)
if args.verbose>0: print query
outputFile="""%s.reqnums.csv""" % (args.baseName)
connection.query_and_save(query,outputFile)
connection.close()
if args.verbose>0: print
return 0
##################################
#
# Based on query from Robert Gruendl.
def reqnum_query(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'reqnum_query'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
select distinct (t.reqnum)
from prod.proctag t, prod.qa_summary qa
where t.pfw_attempt_id=qa.pfw_attempt_id and
t.tag='%s' and
qa.expnum between %d and %d
order by t.reqnum
""" % (args.tag, args.expnumMin, args.expnumMax)
if args.verbose>0: print query
outputFile="""%s.reqnums.csv""" % (args.baseName)
connection.query_and_save(query,outputFile)
connection.close()
if args.verbose>0: print
return 0
##################################
# reqnum_query_results_to_csv_band_files_old
#
# Based on sepBands from y2a1_tertiaries.py
#
def reqnum_query_results_to_csv_band_files_old(args):
import os
import sys
import glob
from astropy.io import fits
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'reqnum_query_results_to_csv_band_files_old'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# This pattern matching will need to be made more general
# (maybe instead pass a list of files instead of trying to
# pattern match?)...
pattern = """%s.%s.??????-??????.????.objinfo.fits""" % \
(args.baseName, args.tag)
fileNameList = sorted(glob.glob(pattern))
if args.verbose>1: print fileNameList
# Open up a CSV output file for each filter band...
bandList = args.bands.strip().split(',')
fout = {}
for band in bandList:
outputFile = """%s.objinfo.%s.csv""" % (args.baseName, band)
fout[band] = open(outputFile,'w')
# Read header from the "zeroth" input file from fileNameList
hdulist = fits.open(fileNameList[0])
columnNamesList = hdulist[1].columns.names
hdulist.close()
# Check if there is column called 'BAND'...
try:
bandCol = columnNamesList.index('BAND')
except:
print """Could not find 'BAND' in header of %s...""" % (fileNameList[0])
print """Exiting 'reqnum_query_results_to_csv_band_files' method with return code 1 now..."""
return 1
# Create a CSV header line from columnNamesList...
hout = ''
for columnName in columnNamesList:
hout = hout+columnName+','
hout = hout[:-1]
hout = hout+'\n'
# Write header to each of the output files (one per filter band)...
for band in bandList:
fout[band].write(hout)
# Loop through each of the input files from fileNameList...
invalidBandList = []
for fileName in fileNameList:
print fileName
# Open the input file...
hdulist = fits.open(fileName)
tbdata = hdulist[1].data
for linecnt in range(tbdata.size):
if ( (linecnt/1000.0 == int(linecnt/1000.0)) and (args.verbose > 1) ):
print '\r'+'Progress (lines read from '+fileName+'): ',linecnt,
sys.stdout.flush()
lins = tbdata[linecnt]
#band = tbdata['BAND'][linecnt]
band = lins[bandCol]
if band in bandList:
# Create a CSV data line from lins...
lin = ''
for colcnt in range(len(lins)):
lin = lin+str(lins[colcnt])+','
lin = lin[:-1]
lin = lin+'\n'
# Output CSV data line to appropriate output file...
fout[band].write(lin)
elif band not in invalidBandList:
invalidBandList.append(band)
print 'Unrecognized band: '+band
# Close this input file...
hdulist.close()
if args.verbose > 1:
print
# Close the output file for each filter band...
for band in bandList:
fout[band].close
if args.verbose>0: print
return 0
##################################
# reqnum_query_results_to_csv_band_files
#
# Based on sepBands from y2a1_tertiaries.py
# Uses STILTS for quick conversion of FITS to CSV of input files.
#
def reqnum_query_results_to_csv_band_files(args):
import os
import sys
import datetime
import glob
from astropy.io import fits
stiltsDir='/usrdevel/dp0/dtucker/STILTS3.0/latest'
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'reqnum_query_results_to_csv_band_files'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# This pattern matching will need to be made more general
# (maybe instead pass a list of files instead of trying to
# pattern match?)...
#pattern = """%s.%s.??????-??????.????.objinfo.fits""" % \
pattern = """%s.%s.??????-??????.????.objinfo*.fits""" % \
(args.baseName, args.tag)
fileNameList = sorted(glob.glob(pattern))
if args.verbose>1: print fileNameList
# Convert each FITS file to CSV format via the STILTS tcopy command...
print 'Converting files from FITS to CSV format...'
for fileName in fileNameList:
print fileName
inputFile = fileName
outputFile = """%s.csv""" % (inputFile)
cmd = """%s/stilts -Xmx8000m tcopy in=%s ifmt=fits out=%s ofmt=csv""" % (stiltsDir,inputFile,outputFile)
print datetime.datetime.now()
print """Running: %s""" % (cmd)
status = os.system(cmd)
print datetime.datetime.now()
print
if status !=0:
print """%s failed.""" % (cmd)
return 1
# Open up a CSV output file for each filter band...
bandList = args.bands.strip().split(',')
fout = {}
for band in bandList:
outputFile = """%s.objinfo.%s.csv""" % (args.baseName, band)
fout[band] = open(outputFile,'w')
# Read header from the "zeroth" input file from fileNameList
# (well, actually from its equivalent CSV file)...
csvFileName = """%s.csv""" % (fileNameList[0])
fin = open(csvFileName)
hin = fin.readline()
fin.close()
# Identify which column contains the filter band...
hins=hin.upper().strip().split(',')
try:
bandCol = hins.index('BAND')
except:
print """Could not find 'BAND' in header of %s...""" % (fileNameList[0])
print """Exiting 'reqnum_query_results_to_csv_band_files' method with return code 1 now..."""
return 1
# Write header to each of the output files (one per filter band)...
for band in bandList:
fout[band].write(hin)
# Loop through each of the input files from fileNameList...
invalidBandList = []
for fileName in fileNameList:
csvFileName = """%s.csv""" % (fileName)
print csvFileName
# Open the input file...
with open(csvFileName,'r') as fin:
# Skip header line...
try:
next(fin)
except:
print """%s is empty. Skipping...""" % (csvFileName)
continue
print
linecnt = 0
for lin in fin:
# Increment line count...
linecnt += 1
if ( (linecnt/1000.0 == int(linecnt/1000.0)) and (args.verbose > 1) ):
print '\r'+'Progress (lines read from '+csvFileName+'): ',linecnt,
sys.stdout.flush()
lins=lin.strip().split(',')
band = lins[bandCol]
if band in bandList:
fout[band].write(lin)
else:
print 'Unrecognized band: '+band
if args.verbose > 1:
print
# Close the output file for each filter band...
for band in bandList:
fout[band].close
# If the "--cleanup" option was used, remove the csv versions
# of the original files...
if args.cleanup:
for fileName in fileNameList:
csvFileName = """%s.csv""" % (fileName)
os.remove(csvFileName)
if args.verbose>0: print
return 0
##################################
# nite_band_query_results_to_csv_band_files
#
# Almost identical to reqnum_quer_results_to_csv_bands_files above.
# Uses STILTS for quick conversion of FITS to CSV of input files.
#
def nite_band_query_results_to_csv_band_files(args):
import os
import sys
import datetime
import glob
from astropy.io import fits
stiltsDir='/usrdevel/dp0/dtucker/STILTS3.0/latest'
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'nite_band_query_results_to_csv_band_files'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# This pattern matching will need to be made more general
# (maybe instead pass a list of files instead of trying to
# pattern match?)...
#pattern = """%s.%s.??????-??????.????.objinfo.fits""" % \
#pattern = """%s.%s.??????-??????.????.objinfo*.fits""" % \
# (args.baseName, args.tag)
# The different pattern is basically the only difference
# between nite_band_query_results_to_csv_band_files and
# reqnum_query_results_to_csv_band_files...
pattern = """%s.%s.????????.?.objinfo*.fits""" % \
(args.baseName, args.tag)
fileNameList = sorted(glob.glob(pattern))
if args.verbose>1: print fileNameList
# Convert each FITS file to CSV format via the STILTS tcopy command...
print 'Converting files from FITS to CSV format...'
for fileName in fileNameList:
print fileName
inputFile = fileName
outputFile = """%s.csv""" % (inputFile)
cmd = """%s/stilts -Xmx8000m tcopy in=%s ifmt=fits out=%s ofmt=csv""" % (stiltsDir,inputFile,outputFile)
print datetime.datetime.now()
print """Running: %s""" % (cmd)
status = os.system(cmd)
print datetime.datetime.now()
print
if status !=0:
print """%s failed.""" % (cmd)
return 1
# Open up a CSV output file for each filter band...
bandList = args.bands.strip().split(',')
fout = {}
for band in bandList:
outputFile = """%s.objinfo.%s.csv""" % (args.baseName, band)
fout[band] = open(outputFile,'w')
# Read header from the "zeroth" input file from fileNameList
# (well, actually from its equivalent CSV file)...
csvFileName = """%s.csv""" % (fileNameList[0])
fin = open(csvFileName)
hin = fin.readline()
fin.close()
# Identify which column contains the filter band...
hins=hin.upper().strip().split(',')
try:
bandCol = hins.index('BAND')
except:
print """Could not find 'BAND' in header of %s...""" % (fileNameList[0])
print """Exiting 'reqnum_query_results_to_csv_band_files' method with return code 1 now..."""
return 1
# Write header to each of the output files (one per filter band)...
for band in bandList:
fout[band].write(hin)
# Loop through each of the input files from fileNameList...
invalidBandList = []
for fileName in fileNameList:
csvFileName = """%s.csv""" % (fileName)
print csvFileName
# Open the input file...
with open(csvFileName,'r') as fin:
# Skip header line...
try:
next(fin)
except:
print """%s is empty. Skipping...""" % (csvFileName)
continue
print
linecnt = 0
for lin in fin:
# Increment line count...
linecnt += 1
if ( (linecnt/1000.0 == int(linecnt/1000.0)) and (args.verbose > 1) ):
print '\r'+'Progress (lines read from '+csvFileName+'): ',linecnt,
sys.stdout.flush()
lins=lin.strip().split(',')
band = lins[bandCol]
if band in bandList:
fout[band].write(lin)
else:
print 'Unrecognized band: '+band
if args.verbose > 1:
print
# Close the output file for each filter band...
for band in bandList:
fout[band].close
# If the "--cleanup" option was used, remove the csv versions
# of the original files...
if args.cleanup:
for fileName in fileNameList:
csvFileName = """%s.csv""" % (fileName)
os.remove(csvFileName)
if args.verbose>0: print
return 0
##################################
# add_exp_info
#
def add_exp_info(args):
import numpy as np
import os
import sys
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'add_exp_info'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# Read in expInfoFile...
expInfoFile="""%s.expinfo.csv""" % (args.baseName)
if os.path.isfile(expInfoFile)==False:
print """Exposure info file %s does not exist...""" % (expInfoFile)
print """Exiting addExpInfo method with return code 1"""
return 1
expInfoDict = {}
with open(expInfoFile,'r') as fin:
# Read header line...
try:
hin = fin.readline()
except:
print """%s is empty.""" % (expInfoFile)
print """Exiting 'add_exp_info' method with return code 1 now..."""
return 1
# Identify column in the expInfoFile CSV file corresponding to EXPNUM...
hins=hin.upper().strip().split(',')
try:
expnumCol = hins.index('EXPNUM')
except:
print """Could not find 'EXPNUM' in header of %s...""" % (expInfoFile)
print """Exiting 'add_exp_info' method with return code 1 now..."""
return 1
hin_orig = hin
print
linecnt = 0
for lin in fin:
# Increment line count...
linecnt += 1
if ( (linecnt/1000.0 == int(linecnt/1000.0)) and (args.verbose > 1) ):
print '\r'+'Progress (lines read from '+expInfoFile+'): ',linecnt,
sys.stdout.flush()
lins=lin.strip().split(',')
expnum = int(lins[expnumCol])
expnumString = str(expnum)
expInfoDict[expnumString] = lin.strip()
if args.verbose > 1:
print
# Loop through each filter band in the official bandList...
bandList = args.bands.strip().split(',')
for band in bandList:
# Check for existence of objInfoFile for this filter band...
objInfoFile = """%s.objinfo.%s.csv""" % (args.baseName, band)
if os.path.isfile(objInfoFile)==False:
print """%s does not exist... Skipping""" % (objInfoFile)
continue
# Open the outputFile for this filter band...
outputFile = """%s.%s.csv""" % (args.baseName, band)
fout = open(outputFile,'w')
# Open the objInfoFile for this filter band...
with open(objInfoFile,'r') as fin:
# Read header line...
try:
hin = fin.readline()
except:
print """%s is empty. Skipping...""" % (objInfoFile)
continue
# Identify column in the objInfoFile CSV file corresponding to EXPNUM...
hins=hin.upper().strip().split(',')
try:
expnumCol = hins.index('EXPNUM')
except:
print """Could not find 'EXPNUM' in header of %s... Skipping""" % (objInfoFile)
continue
# Write header line...
hout = hin_orig.upper().strip()+','+hin.upper().strip()+'\n'
fout.write(hout)
linecnt = 0
for lin in fin:
# Increment line count...
linecnt += 1
if ( (linecnt/1000.0 == int(linecnt/1000.0)) and (args.verbose > 1) ):
print '\r'+'Progress (lines read from '+objInfoFile+'): ',linecnt,
sys.stdout.flush()
lins=lin.strip().split(',')
expnum = int(lins[expnumCol])
expnumString = str(expnum)
try:
outputLine=expInfoDict[expnumString]
except:
if args.verbose > 2:
print """Expnum %d not in %s. Skipping...""" % (expnum, expInfoFile)
else:
outputLine = outputLine+','+lin.strip()+'\n'
fout.write(outputLine)
if args.verbose > 1:
print
if args.verbose>0: print
return 0
##################################
#
def combine_stilts_internal_match_files(args):
import os
import sys
import glob
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'combine_stilts_internal_match_files'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
fileNamePattern = 'y2a1_rawdata.svy1y2y3.sorted.tmp.g.csv.??.inmatch'
fileNamePattern = args.fileNamePattern
fileNameList = sorted(glob.glob(fileNamePattern))
if args.verbose>2: print fileNameList
# Read header line from the "zeroth" input file from fileNameList
fin = open(fileNameList[0])
hin = fin.readline()
fin.close()
# Identify which column contains the GroupID...
hins=hin.upper().strip().split(',')
try:
groupIDCol = hins.index('GROUPID')
except:
print """Could not find 'GROUPID' in header of %s...""" % (fileNameList[0])
print """Exiting now..."""
return 1
# Open up the output file and write header to it...
outputFileName = args.outputFileName
fout = open(outputFileName,'w')
fout.write(hin)
groupIDOffset = 0
# Loop through each of the input files from fileNameList...
for fileName in fileNameList:
print fileName
# Create an array of newGroupIDs contained within this fileName...
newGroupIDList = []
# Open the input file...
with open(fileName,'r') as fin:
# Skip header line...
try:
next(fin)
except:
print """%s is empty. Skipping...""" % (fileName)
continue
#print
linecnt = 0
for lin in fin:
# Increment line count...
linecnt += 1
if ( (linecnt/10000.0 == int(linecnt/10000.0)) and (args.verbose > 1) ):
print '\r'+'Progress (lines read from '+fileName+'): ',linecnt,
sys.stdout.flush()
# Update GroupID...
lins=lin.strip().split(',')
groupID = int(lins[groupIDCol])
newGroupID = groupID + groupIDOffset
newGroupIDList.append(newGroupID)
lins[groupIDCol] = str(newGroupID)
# Output updated line...
outputLine = ''
for value in lins:
outputLine = outputLine+value+','
outputLine = outputLine[:-1]
outputLine = outputLine+'\n'
fout.write(outputLine)
# After we finish with a given fileName,
# we update the groupIDOffset to be
# the value of the largest newGroupID
# in that fileName...
groupIDOffset = max(newGroupIDList)
print groupIDOffset
if args.verbose > 1:
print
print
# Close the output file...
fout.close()
if args.verbose>0: print
return 0
##################################
#
def reformat_stilts_internal_match_files_for_gcm_exp_old(args):
import os
import sys
import glob
import numpy as np
import pandas as pd
import math
import gcmPythonTools
import datetime
print 'Start of program: ',
print datetime.datetime.now()
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'reformat_stilts_internal_match_files_for_gcm_exp'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# Add blacklist, y3a1_blacklist, y3a1_graylist (non-photometric exposures),
# and updated qa_summary table to "prune" non-photometric or otherwise
# questionable data from the fit.
fileNamePattern = 'y2a1_rawdata.svy1y2y3.sorted.tmp.g.csv.ac.inmatch.first1000'
#fileNamePattern = 'y2a1_rawdata.svy1y2y3.sorted.tmp.g.csv.??.inmatch'
fileNamePattern = args.fileNamePattern
fileNameList = sorted(glob.glob(fileNamePattern))
if args.verbose>2: print fileNameList
# Open up the output file and write header to it...
outputFileName = args.outputFileName
fout = open(outputFileName,'w')
hdr = gcmPythonTools.gcmInputFileHeader("stdstars")
#if args.verbose > 1:
# print hdr
fout.write(hdr)
groupIDOffset = 0
# Loop through each of the input files from fileNameList...
for fileName in fileNameList:
#print fileName
# Create an array of newGroupIDs contained within this fileName...
newGroupIDList = []
# Open the input file as a pandas dataframe...
print 'Start reading '+fileName+': ',
print datetime.datetime.now()
dataFrame = pd.pandas.read_csv(fileName)
print 'Finished reading '+fileName+': ',
print datetime.datetime.now()
dataFrame['MAG'] = -2.5*np.log10(dataFrame['FLUX_PSF']/dataFrame['EXPTIME'])
dataFrame['MAGERR'] = 1.086*dataFrame['FLUXERR_PSF']/dataFrame['FLUX_PSF']
#print dataFrame
# Update GroupID and groupIDOffset
dataFrame['GroupID'] = dataFrame['GroupID'] + groupIDOffset
groupIDOffset = max(dataFrame['GroupID'])
# Identify unique set of groupIDs...
uniqGroupIDArray = np.sort(dataFrame['GroupID'].unique())
print 'Starting loop through individual groups: ',
print datetime.datetime.now()
for groupID in uniqGroupIDArray:
#if args.verbose > 1:
#print """Working on GroupID %d (max GroupID=%d)""" % (groupID, groupIDOffset)
# Create a temporary data frame just containing
# members of this particular group...
mask = (dataFrame['GroupID'] == groupID)
tmpDataFrame = dataFrame[mask]
# How many members are in this particular group
# (especially after masking for bad expnums, etc.)?
nmem = tmpDataFrame['GroupID'].size
# Loop over all unique pairs in this group, outputting
# info in a format digestible by the Global Calibrations
# Module code GlobalZPSolverDC6.java...
for i in range(0,nmem-1):
regionid1 = tmpDataFrame['EXPNUM_old'].iloc[i]
regionRaCenDeg1 = tmpDataFrame['EXPRA'].iloc[i]
regionDecCenDeg1 = tmpDataFrame['EXPDEC'].iloc[i]
regionQuality1 = 0
starid1 = tmpDataFrame['GLOBJID'].iloc[i]
ximage1 = tmpDataFrame['X_IMAGE'].iloc[i]
yimage1 = tmpDataFrame['Y_IMAGE'].iloc[i]
raDeg1 = tmpDataFrame['RA'].iloc[i]
decDeg1 = tmpDataFrame['DEC'].iloc[i]
mag1 = tmpDataFrame['MAG'].iloc[i]
magErr1 = tmpDataFrame['MAGERR'].iloc[i]
#flux1 = tmpDataFrame['FLUX_PSF'].iloc[i]
#fluxErr1 = tmpDataFrame['FLUXERR_PSF'].iloc[i]
#exptime1 = tmpDataFrame['EXPTIME'].iloc[i]
#if ( (flux1 > 0.) and (exptime1 > 0.) ) :
# mag1 = -2.5*math.log10(flux1/exptime1)
#else:
# mag1 = -9999.
#if ( (flux1 > 0.) and (fluxErr1 >= 0.) ):
# magErr1 = 1.086*fluxErr1/flux1
#else:
# magErr1 = -9999.
for j in range(i+1,nmem):
regionid2 = tmpDataFrame['EXPNUM_old'].iloc[j]
regionRaCenDeg2 = tmpDataFrame['EXPRA'].iloc[j]
regionDecCenDeg2 = tmpDataFrame['EXPDEC'].iloc[j]
regionQuality2 = 0
starid2 = tmpDataFrame['GLOBJID'].iloc[j]
ximage2 = tmpDataFrame['X_IMAGE'].iloc[j]
yimage2 = tmpDataFrame['Y_IMAGE'].iloc[j]
raDeg2 = tmpDataFrame['RA'].iloc[j]
decDeg2 = tmpDataFrame['DEC'].iloc[j]
mag2 = tmpDataFrame['MAG'].iloc[j]
magErr2 = tmpDataFrame['MAGERR'].iloc[j]
#flux2 = tmpDataFrame['FLUX_PSF'].iloc[j]
#fluxErr2 = tmpDataFrame['FLUXERR_PSF'].iloc[j]
#exptime2 = tmpDataFrame['EXPTIME'].iloc[j]
#if ( (flux2 > 0.) and (exptime2 > 0.) ) :
# mag2 = -2.5*math.log10(flux2/exptime2)
#else:
# mag2 = -9999.
#if ( (flux2 > 0.) and (fluxErr2 >= 0.) ):
# magErr2 = 1.086*fluxErr2/flux2
#else:
# magErr2 = -9999.
#try:
# sepArcsec = 3600.*gcmPythonTools.sepDegGet(raDeg1,decDeg1,raDeg2,decDeg2)
#except:
# sepArcsec = -9999.
# Newly added - need to replace:
sepArcsec = 0.00
#if ( mag1 > -99. and magErr1 > 0. and mag2 > -99. and magErr2 > 0\
# and sepArcsec >= 0. and regionid1 != regionid2 ):
outputLine = '%d %f %f %d %d %f %f %f %f %d %f %f %d %d %f %f %f %f %f\n' % \
( regionid1, regionRaCenDeg1, regionDecCenDeg1, regionQuality1, \
starid1, raDeg1, decDeg1, mag1, magErr1,\
regionid2, regionRaCenDeg2, regionDecCenDeg2, regionQuality2, \
starid2, raDeg2, decDeg2, mag2, magErr2,\
sepArcsec)
fout.write(outputLine)
#endif
#endfor_j
#endfor_i
#print groupIDOffset
#endfor_groupID
del dataFrame
#if args.verbose > 1:
# print
# print
#endfor_filename
# Close the output file...
fout.close()
print 'Finished: ',
print datetime.datetime.now()
if args.verbose>0: print
return 0
##################################
#
def reformat_stilts_internal_match_files_for_gcm_exp(args):
import os
import sys
import glob
import numpy as np
import pandas as pd
import math
import gcmPythonTools
import datetime
print 'Start of program: ',
print datetime.datetime.now()
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'reformat_stilts_internal_match_files_for_gcm_exp'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# Add blacklist, y3a1_blacklist, y3a1_graylist (non-photometric exposures),
# and updated qa_summary table to "prune" non-photometric or otherwise
# questionable data from the fit.
#fileNamePattern = 'y2a1_rawdata.svy1y2y3.sorted.tmp.g.csv.ac.inmatch.first1000'
fileNamePattern = 'y2a1_rawdata.svy1y2y3.sorted.tmp.g.csv.??.inmatch'
fileNamePattern = args.fileNamePattern
fileNameList = sorted(glob.glob(fileNamePattern))
if args.verbose>2: print fileNameList
# Open up the output file and write header to it...
outputFileName = args.outputFileName
fout = open(outputFileName,'w')
hdr = gcmPythonTools.gcmInputFileHeader("stdstars")
#if args.verbose > 1:
# print hdr
fout.write(hdr)
# Loop through each of the input files from fileNameList...
for fileName in fileNameList:
#print fileName
# Open the input file as a pandas dataframe...
print 'Start reading '+fileName+': ',
print datetime.datetime.now()
dataFrame = pd.pandas.read_csv(fileName)
print 'Finished reading '+fileName+': ',
print datetime.datetime.now()
dataFrame['MAG'] = -2.5*np.log10(dataFrame['FLUX_PSF']/dataFrame['EXPTIME'])
dataFrame['MAGERR'] = 1.086*dataFrame['FLUXERR_PSF']/dataFrame['FLUX_PSF']
maxGroupID = max(dataFrame['GroupID'])
grouped = dataFrame.groupby('GroupID')
print 'Starting loop through individual groups: ',
print datetime.datetime.now()
ntot = 0
for name, group in grouped:
if args.verbose > 1:
if (ntot % 100 == 0):
print datetime.datetime.now(),
print """Working on GroupID %d (max GroupID=%d)""" % (name, maxGroupID)
ntot = ntot + 1
# How many members are in this particular group
# (especially after masking for bad expnums, etc.)?
nmem = group['GroupID'].size
# Loop over all unique pairs in this group, outputting
# info in a format digestible by the Global Calibrations
# Module code GlobalZPSolverDC6.java...
for i in range(0,nmem-1):
regionid1 = group['EXPNUM_old'].iloc[i]
regionRaCenDeg1 = group['EXPRA'].iloc[i]
regionDecCenDeg1 = group['EXPDEC'].iloc[i]
regionQuality1 = 0
starid1 = group['GLOBJID'].iloc[i]
ximage1 = group['X_IMAGE'].iloc[i]
yimage1 = group['Y_IMAGE'].iloc[i]
raDeg1 = group['RA'].iloc[i]
decDeg1 = group['DEC'].iloc[i]
mag1 = group['MAG'].iloc[i]
magErr1 = group['MAGERR'].iloc[i]
for j in range(i+1,nmem):
regionid2 = group['EXPNUM_old'].iloc[j]
regionRaCenDeg2 = group['EXPRA'].iloc[j]
regionDecCenDeg2 = group['EXPDEC'].iloc[j]
regionQuality2 = 0
starid2 = group['GLOBJID'].iloc[j]
ximage2 = group['X_IMAGE'].iloc[j]
yimage2 = group['Y_IMAGE'].iloc[j]
raDeg2 = group['RA'].iloc[j]
decDeg2 = group['DEC'].iloc[j]
mag2 = group['MAG'].iloc[j]
magErr2 = group['MAGERR'].iloc[j]
#try:
# sepArcsec = 3600.*gcmPythonTools.sepDegGet(raDeg1,decDeg1,raDeg2,decDeg2)
#except:
# sepArcsec = -9999.
# Newly added - need to replace:
sepArcsec = 0.00
#if ( mag1 > -99. and magErr1 > 0. and mag2 > -99. and magErr2 > 0\
# and sepArcsec >= 0. and regionid1 != regionid2 ):
outputLine = '%d %f %f %d %d %f %f %f %f %d %f %f %d %d %f %f %f %f %f\n' % \
( regionid1, regionRaCenDeg1, regionDecCenDeg1, regionQuality1, \
starid1, raDeg1, decDeg1, mag1, magErr1,\
regionid2, regionRaCenDeg2, regionDecCenDeg2, regionQuality2, \
starid2, raDeg2, decDeg2, mag2, magErr2,\
sepArcsec)
fout.write(outputLine)
#endif
#endfor_j
#endfor_i
#print groupIDOffset
#endfor_groupID
del dataFrame
#if args.verbose > 1:
# print
# print
#endfor_filename
# Close the output file...
fout.close()
print 'Finished: ',
print datetime.datetime.now()
if args.verbose>0: print
return 0
##################################
#
def blacklist_query(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'blacklist_query'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
select *
from prod.blacklist
where expnum between %d and %d
order by expnum, ccdnum
""" % (args.expnumMin, args.expnumMax)
if args.verbose>0: print query
outputFile="""%s.blacklist.csv""" % (args.baseName)
connection.query_and_save(query,outputFile)
connection.close()
if args.verbose>0: print
return 0
##################################
# PROD.y3a1_blacklist contains the merger of the
# original PROD.blacklist table and Eli Rykoff's
# special, just-for-Y3A1 spread_model-based
# y3a1_blacklist...
def y3a1_blacklist_query(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'y3a1_blacklist_query'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
select *
from prod.y3a1_blacklist
where expnum between %d and %d
order by expnum, ccdnum
""" % (args.expnumMin, args.expnumMax)
if args.verbose>0: print query
outputFile="""%s.y3a1_blacklist.csv""" % (args.baseName)
connection.query_and_save(query,outputFile)
connection.close()
if args.verbose>0: print
return 0
##################################
#
# Based on se_object_query_by_reqnum...
def image_query(args):
import easyaccess as ea
import numpy as np
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'image_query'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
connection=ea.connect('desoper')
query="""
with x as (
select /*+materialize */ t.pfw_attempt_id, qa.expnum
from prod.proctag t, prod.qa_summary qa
where t.pfw_attempt_id=qa.pfw_attempt_id and
tag='%s'
)
select i.pfw_attempt_id, i.band, i.expnum, i.ccdnum,
i.rac1, i.decc1, i.rac2, i.decc2, i.rac3, i.decc3, i.rac4, i.decc4,
i.ra_cent, i.dec_cent, i.racmax, i.racmin, i.crossra0
from prod.image i, x
where x.pfw_attempt_id=i.pfw_attempt_id and
i.filetype='red_starflat' and
i.expnum=x.expnum
order by i.expnum, i.ccdnum
""" % (args.tag)
if args.verbose>0: print query
outputFile = args.outputFileName
connection.query_and_save(query,outputFile)
connection.close()
if args.verbose>0: print
return 0
##################################
#
# Based on se_object_query_by_reqnum...
def exp_image_cat_y3a1blacklist_query(args):
import easyaccess as ea
import numpy as np
import pandas as pd
import datetime
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'exp_image_cat_y3a1blacklist_query'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
# Open database connection...
connection=ea.connect('desoper')
# Prepare and run query0...
if args.verbose>0:
print
print "Running query0..."
print datetime.datetime.now()
print
query0="""
select e.expnum, t.unitname, t.reqnum, t.attnum,
e.radeg as EXPRA, e.decdeg as EXPDEC, e.exptime, e.airmass, e.band,
e.nite, e.mjd_obs, e.field, e.object, e.program,
qa.pfw_attempt_id, qa.source, qa.t_eff, qa.psf_fwhm,
qa.f_eff, qa.skybrightness, qa.b_eff, qa.cloud_apass,
qa.cloud_nomad, qa.n_apass, qa.n_nomad, qa.c_eff,
qa.skytilt, qa.astrom_sigma, qa.astrom_offset,
qa.astrom_ndets, qa.astrom_chi2, qa.nobjects,
qa.flag, qa.calnac
from prod.exposure e, prod.proctag t, prod.qa_summary qa
where t.pfw_attempt_id=qa.pfw_attempt_id and
e.expnum=qa.expnum and
t.tag='%s'
""" % (args.tag)
if args.verbose>0: print query0
df0 = connection.query_to_pandas(query0).sort('EXPNUM')
if args.verbose > 0:
print datetime.datetime.now()
if args.verbose > 0:
print
print "Outputting query0 to test_df0.csv..."
print datetime.datetime.now()
df0.to_csv('test_df0.csv',index=False,float_format='%.8f')
if args.verbose > 0:
print datetime.datetime.now()
# Prepare and run query1...
if args.verbose>0:
print
print "Running query1..."
print datetime.datetime.now()
print
query1="""
with x as (
select /*+materialize */ t.pfw_attempt_id, qa.expnum
from prod.proctag t, prod.qa_summary qa
where t.pfw_attempt_id=qa.pfw_attempt_id and
tag='%s'
)
select i.pfw_attempt_id, i.band, i.expnum, i.ccdnum,
i.gaina, i.gainb, i.rdnoisea, i.rdnoiseb,
i.rac1, i.decc1, i.rac2, i.decc2, i.rac3, i.decc3, i.rac4, i.decc4,
i.ra_cent, i.dec_cent, i.racmax, i.racmin, i.crossra0
from prod.image i, x
where x.pfw_attempt_id=i.pfw_attempt_id and
i.filetype='red_starflat' and
i.expnum=x.expnum
""" % (args.tag)
if args.verbose>0: print query1
df1 = connection.query_to_pandas(query1).sort(['EXPNUM','CCDNUM'])
#df1 = pd.read_csv('test_df1.csv')
if args.verbose > 0:
print datetime.datetime.now()
if args.verbose > 0:
print
print "Outputting query1 to test_df1.csv..."
print datetime.datetime.now()
df1.to_csv('test_df1.csv',index=False,float_format='%.8f')
if args.verbose > 0:
print datetime.datetime.now()
# Prepare and run query2...
if args.verbose>0:
print
print "Running query2..."
print datetime.datetime.now()
print
query2="""
with x as (
select /*+materialize */ t.pfw_attempt_id, qa.expnum
from prod.proctag t, prod.qa_summary qa
where t.pfw_attempt_id=qa.pfw_attempt_id and
tag='%s'
)
select c.filename, c.band, c.expnum, c.ccdnum, c.objects
from prod.catalog c, x
where x.pfw_attempt_id=c.pfw_attempt_id and
c.filetype='cat_finalcut' and
c.expnum=x.expnum
""" % (args.tag)
if args.verbose>0: print query2
df2 = connection.query_to_pandas(query2).sort(['EXPNUM','CCDNUM'])
if args.verbose > 0:
print datetime.datetime.now()
if args.verbose > 0:
print
print "Outputting query2 to test_df2.csv..."
print datetime.datetime.now()
df2.to_csv('test_df2.csv',index=False,float_format='%.8f')
if args.verbose > 0:
print datetime.datetime.now()
# Prepare and run query3...
if args.verbose>0:
print
print "Running query3..."
print datetime.datetime.now()
print
query3="""
select *
from prod.y3a1_blacklist
"""
if args.verbose>0: print query3
df3 = connection.query_to_pandas(query3).sort(['EXPNUM','CCDNUM'])
if args.verbose > 0:
print datetime.datetime.now()
# Add EXPNUMCCDNUM column...
df3.loc[:, 'EXPNUMCCDNUM'] = 100*df3['EXPNUM'] + df3['CCDNUM']
if args.verbose > 0:
print
print "Outputting query3 to test_df3.csv..."
print datetime.datetime.now()
df3.to_csv('test_df3.csv',index=False,float_format='%.8f')
if args.verbose > 0:
print datetime.datetime.now()
# Create blacklist array based on EXPNUMCCDNUM column...
blacklistArray = df3['EXPNUMCCDNUM'].values
# Merge queries...
#
# To drop columns with duplicate names, we make use of
# recommendations from an e-mail from S. Allam (27 July 2016)
# and from these two stackoverflow questions:
# http://stackoverflow.com/questions/27313647/merging-two-pandas-dataframes-results-in-duplicate-columns
# and http://stackoverflow.com/questions/19125091/pandas-merge-how-to-avoid-duplicating-columns
# Merge df0 and df1...
if args.verbose > 0:
print
print "Merging data frames df0 and df1..."
print datetime.datetime.now()
df01 = df0.merge(df1, on=['EXPNUM'], suffixes=('', '_y')).sort(['EXPNUM'], ascending=True).reset_index(drop=True)
to_drop = [colname for colname in df01 if colname.endswith('_y')]
df01.drop(to_drop, axis=1, inplace=True)
if args.verbose > 0:
print datetime.datetime.now()
if args.verbose > 0:
print
print "Outputting data frame df01 to test_df01.csv..."
print datetime.datetime.now()
df01.to_csv('test_df01.csv',index=False,float_format='%.8f')
if args.verbose > 0:
print datetime.datetime.now()
# Merge df01 and df2...
if args.verbose > 0:
print
print "Merging data frames df01 and df2..."
print datetime.datetime.now()
df012 = df01.merge(df2, on=['EXPNUM','CCDNUM'], suffixes=('', '_y')).sort(['EXPNUM','CCDNUM'], ascending=True).reset_index(drop=True)
to_drop = [colname for colname in df012 if colname.endswith('_y')]
df012.drop(to_drop, axis=1, inplace=True)
if args.verbose > 0:
print datetime.datetime.now()
# Add EXPNUMCCDNUM column...
df012.loc[:, 'EXPNUMCCDNUM'] = 100*df012['EXPNUM'] + df012['CCDNUM']
if args.verbose > 0:
print
print "Outputting data frame df012 to test_df012.csv..."
print datetime.datetime.now()
df012.to_csv('test_df012.csv',index=False,float_format='%.8f')
if args.verbose > 0:
print datetime.datetime.now()
# Create blacklist mask...
# Note that this makes use of the inverse operator ("~")...
mask = ~df012.EXPNUMCCDNUM.isin(blacklistArray)
# Output the mask==True entries...
outputFile = args.outputFileName
if args.verbose > 0:
print
print """Outputting blacklist-cleaned data frame df012 to %s...""" % (outputFile)
print datetime.datetime.now()
df012[mask].to_csv(outputFile, index=False)
# Close database connection...
connection.close()
if args.verbose>0: print
return 0
##################################
if __name__ == "__main__":
main()
##################################
| gpl-3.0 |
ned2/okdata | okregression.py | 1 | 4262 | #!/usr/bin/env python2
from __future__ import division
import sys
import os
import argparse
import numpy
from sklearn import linear_model
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
import okc
TRAIN_PROPORTION = 0.8
text_func = lambda user : user.text
match_func = lambda user : float(user.match)
def argparser():
argparser = argparse.ArgumentParser()
argparser.add_argument("--regressor", default='linear')
argparser.add_argument("--alpha", type=float, default=0.1)
argparser.add_argument("path")
argparser.add_argument("--topfeatures", default=0, type=int)
return argparser
def get_regressor(regressor_type):
return {
'linear' : linear_model.LinearRegression(),
'ridge' : linear_model.Ridge(alpha=ALPHA),
'ridgecv' : linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0]),
'lasso' : linear_model.Lasso(alpha=ALPHA),
'sgd' : linear_model.SGDRegressor(alpha=ALPHA),
'pa' : linear_model.PassiveAggressiveRegressor(),
'bridge' : linear_model.BayesianRidge(),
}[regressor_type]
class Regressor(object):
def __init__(self, instances, labels, regressor_type, tfidf=True):
regressor = self.get_regressor(regressor_type, tfidf)
self.regressor = regressor.fit(instances, labels)
def get_regressor(self, regressor_type, tfidf):
count_vect = CountVectorizer(
analyzer='char_wb',
ngram_range=(1,4),
strip_accents='unicode',
)
pipeline = [('vect', count_vect)]
if tfidf:
pipeline.append(('tfidf', TfidfTransformer()))
pipeline.append(('regressor', get_regressor(regressor_type)))
return Pipeline(pipeline)
def test(self, instances, labels):
predicted = self.regressor.predict(instances)
print "Mean absoulte error: {}".format(metrics.mean_absolute_error(labels, predicted))
print "Mean squared error: {}".format(metrics.mean_squared_error(labels, predicted))
def show_most_informative_features(self, instances, topn):
coefs_with_fns = sorted(zip(self.coefficients, self.features, self.feature_values(instances)))
top = zip(coefs_with_fns[:topn], coefs_with_fns[:-(topn + 1):-1])
for (coef_1, fn_1, freq_1), (coef_2, fn_2, freq_2) in top:
fn_1 = okc.decode_punct(fn_1)
fn_2 = okc.decode_punct(fn_2)
print "{:10.4f}{:>20}{:10} |{:10.4f}{:>20}{:10}".format(coef_1, fn_1, freq_1, coef_2, fn_2, freq_2)
def feature_values(self, instances):
"""So we can get the raw counts of the features as used by the regressor."""
matrix = self.regressor.steps[0][1].fit_transform(instances)
return matrix.sum(axis=0).tolist()[0]
@property
def coefficients(self):
return self.regressor.steps[-1][1].coef_[0]
@property
def features(self):
return self.regressor.steps[0][1].get_feature_names()
def train_test(user_paths, regressor_type, topfeatures=0):
train_user_paths, test_user_paths = train_test_split(
user_paths, train_size=TRAIN_PROPORTION, random_state=42
)
train_instances = okc.load_users(train_user_paths, func=text_func)
train_labels = okc.load_users(train_user_paths, func=match_func)
test_instances = okc.load_users(test_user_paths, func=text_func)
test_labels = okc.load_users(test_user_paths, func=match_func)
print "Training model"
regressor = Regressor(train_instances, list(train_labels), regressor_type)
print "Testing model"
regressor.test(test_instances, list(test_labels))
if topfeatures:
regressor.show_most_informative_features(train_instances, topfeatures)
return regressor
def main():
global ALPHA
args = argparser().parse_args()
ALPHA = args.alpha
paths = [os.path.join(args.path, p) for p in os.listdir(args.path)]
regressor = train_test(paths, args.regressor, topfeatures=args.topfeatures)
if __name__ == "__main__":
sys.exit(main())
| mit |
MoamerEncsConcordiaCa/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_label_from_stc.py | 31 | 3963 | """
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <[email protected]>
# Alex Gramfort <[email protected]>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True)
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
| bsd-3-clause |
MechCoder/scikit-learn | examples/svm/plot_rbf_parameters.py | 20 | 8048 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_splits`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis('tight')
scores = grid.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
ellonweb/merlin | Arthur/views/graphs.py | 1 | 10037 | # This file is part of Merlin/Arthur.
# Merlin/Arthur is the Copyright (C)2011 of Elliot Rosemarine.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from django.conf.urls.defaults import include, patterns, url
from django.http import HttpResponse, HttpResponseNotFound
from Core.config import Config
from Core.db import session
from Core.maps import Galaxy, GalaxyHistory, Planet, PlanetHistory, Alliance, AllianceHistory
from Arthur.loadable import loadable, load
graphing = Config.get("Misc", "graphing") != "disabled"
caching = Config.get("Misc", "graphing") == "cached"
if graphing:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.ticker import FuncFormatter
urlpatterns = patterns('',
url(r'^graphs/(?P<type>values|ranks)/', include(patterns('Arthur.views.graphs',
url(r'^(?P<x>\d+)[. :\-](?P<y>\d+)[. :\-](?P<z>\d+)', 'planet', name="planetG"),
url(r'^(?P<x>\d+)[. :\-](?P<y>\d+)', 'galaxy', name="galaxyG"),
url(r'^(?P<name>[^/]+)', 'alliance', name="allianceG"),
))),
) if graphing else ()
white = '#ffffff'
black = '#000000'
red = '#ff0000'
green = '#00ff00'
blue = '#0000ff'
yellow = '#ffff00'
magenta = '#ff00ff'
cyan = '#00ffff'
pink = '#ff6666'
bgcolor = '#292D3A'
axcolor = '#373B48'
class graphs(loadable):
_num2short_scale = 1
width = 500
left, right = {'values': yellow, 'ranks': yellow}, {'values': green, 'ranks': green}
plot = {'values' : lambda ax, Q: ((ax[1].plot(Q[0],Q[1],yellow)[0], "Size",),
(ax[2].plot(Q[0],Q[2],green)[0], "Score",),
(ax[2].plot(Q[0],Q[3],magenta)[0], "Value",),
),
'ranks' : lambda ax, Q: ((ax[1].plot(Q[0],Q[1],yellow)[0], "Size",),
(ax[2].plot(Q[0],Q[2],green)[0], "Score",),
(ax[2].plot(Q[0],Q[3],magenta)[0], "Value",),
),
}
ax = {'values' : lambda i, Q: [(0,), Q[1], Q[2]][i],
'ranks' : lambda i, Q: [(0,), Q[1], Q[2]][i],
}
def process_request(self, request):
if request.path_info == "/draw":
if 'REDIRECT_URL' in request.META and request.META['REDIRECT_URL'].startswith("/graphs"):
request.path_info = request.META['REDIRECT_URL']
del request.META['REDIRECT_URL']
def execute(self, request, user, type, x=None, y=None, z=None, name=None):
width = self.width *(8.0/640)
height = width *(6.0/8.0)
fig = plt.figure(figsize=(width,height,), facecolor=bgcolor, edgecolor=bgcolor)
try:
## Set up the axes
fig.subplots_adjust(left=0.08,right=1-0.08,bottom=0.05,top=1-0.075)
ax = {}
ax[0] = fig.add_subplot(111)
ax[0].yaxis.set_visible(False)
ax[0].set_axis_bgcolor(axcolor)
ax[1] = fig.add_axes(ax[0].get_position(True), sharex=ax[0], frameon=False)
ax[1].yaxis.tick_left()
ax[1].yaxis.set_label_position('left')
ax[1].xaxis.set_visible(False)
ax[2] = fig.add_axes(ax[0].get_position(True), sharex=ax[1], frameon=False)
ax[2].yaxis.tick_right()
ax[2].yaxis.set_label_position('right')
ax[2].xaxis.set_visible(False)
## Load the data
o = self.load(x,y,z,name)
if not o:
return self.error(fig,"Unable to load target x:%s y:%s z:%s name:%s"%(x,y,z,name,))
q = self.query[type].filter_by(current=o)
d = zip(*q.all())
## Plot the data and draw a legend
leg = ax[0].legend(*zip(*self.plot[type](ax,d)), loc='upper left',
ncol=len(d)-1, columnspacing=1,
handlelength=0.1, handletextpad=0.5)
leg.get_frame().set_facecolor(black)
leg.get_frame().set_alpha(0.5)
for t in leg.get_texts():
t.set_color(white)
t.set_fontsize(10)
## Sort out the axes
ax[0].tick_params(labelcolor=white)
ax[1].tick_params(labelcolor=self.left[type])
ax[2].tick_params(labelcolor=self.right[type])
if type == "values":
# pretty axis labels
ax[1].yaxis.set_major_formatter(FuncFormatter(lambda x,pos:self.num2short(x)))
ax[2].yaxis.set_major_formatter(FuncFormatter(lambda x,pos:self.num2short(x)))
else:
ax[1].yaxis.set_major_formatter(FuncFormatter(self.rank_axis_format))
ax[2].yaxis.set_major_formatter(FuncFormatter(self.rank_axis_format))
for i in (0,1,2,):
# axis scales
bottom, top = ax[i].get_ylim()
bottom = 0
peak = max(self.ax[type](i,d))
if peak >= top:
top = peak + 1
if type == "values":
# for values, scale all the way down to 0
ax[i].set_ylim(bottom, top)
else:
# for ranks, invert axes, 0 at the top
ax[i].set_ylim(top, bottom)
## Fix some odd behaviour
ax[0].set_xlim(d[0][0], d[0][-1]) #align first tick to left
ax[2].axvline(x=d[0][0], color=black) #fix gfx glitch on left yaxis
## Title
title = self.title(o) + (" Rank" if type == "ranks" else "") + " History"
fig.suptitle(title, color=white, fontsize=18)
return self.render(fig, self.cache(request, type))
finally:
plt.close(fig)
def rank_axis_format(self, x, pos):
if x == 0:
return ""
if int(x) < x:
return ""
return int(x)
def cache(self, request, type):
if not caching:
return ""
path = "Arthur"+request.path_info
dir = os.path.dirname(path)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
return ""
return path
def render(self, fig, path=""):
canvas = FigureCanvas(fig)
try:
if not caching:
raise IOError
with open(path, "wb") as file:
canvas.print_png(file)
except IOError:
pass
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
return response
def error(self, fig, msg):
fig.suptitle(msg, color=white)
return HttpResponseNotFound(self.render(fig), content_type='image/png')
@load
class planet(graphs):
load = staticmethod(lambda x, y, z, name: Planet.load(x,y,z))
title = staticmethod(lambda o: "%s:%s:%s" %(o.x,o.y,o.z,))
query = {'values' : session.query(PlanetHistory.tick, PlanetHistory.size, PlanetHistory.score, PlanetHistory.value),
'ranks' : session.query(PlanetHistory.tick, PlanetHistory.size_rank, PlanetHistory.score_rank, PlanetHistory.value_rank),
}
@load
class galaxy(graphs):
load = staticmethod(lambda x, y, z, name: Galaxy.load(x,y))
title = staticmethod(lambda o: "%s:%s" %(o.x,o.y,))
query = {'values' : session.query(GalaxyHistory.tick, GalaxyHistory.size, GalaxyHistory.score, GalaxyHistory.value),
'ranks' : session.query(GalaxyHistory.tick, GalaxyHistory.size_rank, GalaxyHistory.score_rank, GalaxyHistory.value_rank),
}
@load
class alliance(graphs):
load = staticmethod(lambda x, y, z, name: Alliance.load(name, exact=True))
title = staticmethod(lambda o: "%s" %(o.name,))
left, right = {'values': yellow, 'ranks': cyan}, {'values': green, 'ranks': green}
query = {'values' : session.query(AllianceHistory.tick, AllianceHistory.size, AllianceHistory.score, AllianceHistory.members),
'ranks' : session.query(AllianceHistory.tick, AllianceHistory.size_rank, AllianceHistory.score_rank, AllianceHistory.points_rank),
}
plot = {'values' : lambda ax, Q: ((ax[1].plot(Q[0],Q[1],yellow)[0], "Size",),
(ax[2].plot(Q[0],Q[2],green)[0], "Score",),
(ax[0].plot(Q[0],Q[3],pink)[0], "Members",),
),
'ranks' : lambda ax, Q: ((ax[2].plot(Q[0],Q[1],yellow)[0], "Size",),
(ax[2].plot(Q[0],Q[2],green)[0], "Score",),
(ax[1].plot(Q[0],Q[3],cyan)[0], "Points",),
),
}
ax = {'values' : lambda i, Q: [Q[3], Q[1], Q[2]][i],
'ranks' : lambda i, Q: [(0,), Q[3], Q[2]][i],
}
| gpl-2.0 |
h2oai/h2o-dev | h2o-py/tests/testdir_misc/pyunit_pubdev_5921_na_prints_large.py | 3 | 2405 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
import math
def partial_plot_test():
# Import data set that contains NAs
data = h2o.import_file(pyunit_utils.locate("smalldata/airlines/AirlinesTrainWgt.csv"), na_strings=["NA"])
test = h2o.import_file(pyunit_utils.locate("smalldata/airlines/AirlinesTrainWgt.csv"), na_strings=["NA"])
x = data.names
y = "IsDepDelayed"
data[y] = data[y]
x.remove(y)
x.remove("Weight")
x.remove("IsDepDelayed_REC")
WC = "Weight"
# Build a GBM model predicting for response CAPSULE
gbm_model = H2OGradientBoostingEstimator(ntrees=80, learn_rate=0.1, seed=12345)
gbm_model.train(x=x, y=y, training_frame=data)
# pdp with weight and no NA
pdpw = gbm_model.partial_plot(data=test, cols=["Input_miss", "Distance"], server=True, plot=False,
weight_column=WC)
# pdp with weight and NA
pdpwNA = gbm_model.partial_plot(data=test, cols=["Input_miss", "Distance"], server=True, plot=False,
weight_column=WC, include_na = True)
input_miss_list = pyunit_utils.extract_col_value_H2OTwoDimTable(pdpwNA[0], "input_miss")
assert math.isnan(input_miss_list[-1]), "Expected last element to be nan but is not."
distance_list = pyunit_utils.extract_col_value_H2OTwoDimTable(pdpwNA[1], "distance")
assert math.isnan(distance_list[-1]), "Expected last element to be nan but is not."
# compare pdpw with pdpwNA, they should equal upto NA since the pdpw does not have NAs.
pyunit_utils.assert_H2OTwoDimTable_equal_upto(pdpw[0], pdpwNA[0], pdpw[0].col_header, tolerance=1e-10)
pyunit_utils.assert_H2OTwoDimTable_equal_upto(pdpw[1], pdpwNA[1], pdpw[1].col_header, tolerance=1e-10)
# compare pdpwNA with theoretical results
pyunit_utils.compare_weightedStats(gbm_model, test, input_miss_list, "Input_miss",
test[WC].as_data_frame(use_pandas=False, header=False), pdpwNA[0], tol=1e-10)
pyunit_utils.compare_weightedStats(gbm_model, test, distance_list, "Distance",
test[WC].as_data_frame(use_pandas=False, header=False), pdpwNA[1], tol=1e-10)
if __name__ == "__main__":
pyunit_utils.standalone_test(partial_plot_test)
else:
partial_plot_test() | apache-2.0 |
RMKD/networkx | examples/multigraph/chess_masters.py | 54 | 5146 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
import networkx as nx
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=list(nx.connected_component_subgraphs(G.to_undirected()))
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| bsd-3-clause |
arm-hpc/allinea_json_analysis | PR_JSON_Scripts/pr_plot_mem_use_mpi_bar.py | 1 | 6161 | #!/usr/bin/env python
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib.pyplot as plt
import argparse
import json
from pr_json_common import *
from json_dict_common import *
def plot_metrics_as_bar(dataDict, labels, yLabel, threads=False):
"""
Plot metrics on a bar char from the list of metrics supplied, where the
metric values are read from the dictionary supplied
Args:
dataDict (dict): Dictionary of the form {numProcs: [list, of, metrics]}
with data to plot
labels (list): Labels for legends of the data
yLabel (str): Label for the y-axis of the graph
threads (bool): Indicator whether to label the x-axis as scaling of threads or processes
Returns:
Nothing
"""
# Get the xData
xData = range(len(dataDict))
# Get the width of an individual bar
totalBarsWidth = 0.95
barsPerProc = len(list(dataDict.values())[0])
barWidth = float(totalBarsWidth) / barsPerProc
barsPerProc -= 1
# For each of the processes plot a bar
colors = ['r', 'b', 'g', 'k']
sortedKeys = sorted(dataDict.keys())
xInd = 0
for key in sortedKeys:
# For each of the metrics plot a bar
barData = dataDict[key]
ind = 0
barLoc = xData[xInd] - float(barsPerProc) * barWidth / 2
barHandles = []
for barItem in barData:
barHandles.append(plt.bar(barLoc, barItem, width=barWidth, color=colors[ind % len(colors)],
align='center', label=labels[ind]))
barLoc += barWidth
ind += 1
xInd += 1
plt.xticks(xData, sortedKeys)
if (threads):
plt.xlabel("Number of Threads")
else:
plt.xlabel("Number of Processes")
plt.ylabel(yLabel)
plt.legend(handles=barHandles, loc=1, bbox_to_anchor=(1.1, 1.1))
#### End of function plot_metrics_as_bar
def get_mem_use_mpi_percent(fileList, threads=False):
"""
Gets the percentage memory usage per core and the MPI usage reported in the
files that are passed in. It is assumed that the files are JSON representations
of Performance report profiles, and that they are in a series showing
strong scaling from a program
Args:
fileList (list): List of files from which to read JSON Performance Reports data
threads (bool): Indicates whether the number of processes or number of threads should be read
Returns:
Dictionary of the format {numProcs : [memUsage, MPIUsage]}
"""
# Read in the list of files
dataDict = {}
for filename in fileList:
profileDict = {}
# Read the json in from file
with open(filename, 'r') as f:
profileDict = json.load(f)
# Get the total memory per-node
memPerNode = get_mem_per_node(profileDict)
# Get the number of nodes
numNodes = get_num_nodes(profileDict)
# Get the memory used in the application per-process
meanMem = get_dict_field_val(profileDict, ["data", "memory", "mean"])
# Get the number of processes
numProcs = get_num_processes(profileDict)
memPercent = (meanMem * numProcs * 100) / (memPerNode * numNodes)
# Get the percentage time spent in MPI
mpiPercent = get_dict_field_val(profileDict, ["data", "overview",
"mpi", "percent"])
#mpiPercent = float(mpiPercent) * get_runtime(profileDict)
# Get the number of processes or threads used
numProcs = get_num_threads(profileDict) if threads else numProcs
# Update the dictionary of data to plot
dataDict.update({numProcs : [memPercent, mpiPercent]})
return dataDict
#### End of function get_mem_use_mpi_percent
def plot_mem_use_mpi_percent_as_bar(fileList, threads=False):
"""
Plots the percentage memory usage per core next to the MPI usage reported
in the files that are passed in. It is assumed that the files are JSON
representations of Performance Report profiles, and that they are in a series
showing strong scaling for a program
Args:
fileList (list): List of files from which to read JSON Performance Reports data
threads (bool): Indicates whether the number of processes or number of threads should be read
Returns:
Nothing
"""
dataDict = get_mem_use_mpi_percent(fileList, threads)
# Plot the metrics
plot_metrics_as_bar(dataDict, ["Memory Use", "MPI Time"], "Proportion (%)", threads)
#### End of function plot_mem_use_mpi_percent_as_bar
if (__name__ == "__main__"):
parser = argparse.ArgumentParser(description="Utility to plot a bar chart" +
" of the percentage memory usage vs the percentage of time spent " +
"in MPI calls in a program run.")
# Add a file containing a list of files to read data from
parser.add_argument("infile", help="JSON file to read a list of input files from." +
" It is assumed that the input files are part of a series of runs that " +
"show weak scaling of a program", type=argparse.FileType('r'))
# Add an argument to show if the strong scaling is for threads or processes
parser.add_argument("--threads", help="Indicates whether threads or processes" +
" should used in the scaling analysis", action="store_true",
default=False)
args = parser.parse_args()
# Plot the memory usage and MPI percentage run time from the file passed in
fileList = [line.strip() for line in args.infile.readlines()]
plot_mem_use_mpi_percent_as_bar(fileList, args.threads)
plt.show()
| apache-2.0 |
ivazquez/genetic-variation | src/plot.py | 3 | 30669 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Load external dependencies
from setup import *
# Load internal dependencies
import config, utils
def histogram_binned_data(ax, data, bins=50):
"""
"""
nx, xbins = np.histogram(data, bins=bins, normed=True)
nx_frac = nx/float(len(nx)) # Each bin divided by total number of objects
width = xbins[1] - xbins[0] # Width of each bin
x = np.ravel(zip(xbins[:-1], xbins[:-1]+width))
y = np.ravel(zip(nx_frac,nx_frac))
return x, y
def boxplot_custom(bp, ax, colors, hatches):
"""
Custom boxplot style
"""
for i in range(len(bp['boxes'])):
box = bp['boxes'][i]
box.set_linewidth(0)
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
boxPolygon = patches.Polygon(
boxCoords,
facecolor = colors[i % len(colors)],
linewidth=0,
hatch = hatches[i % len(hatches)],
zorder=4
)
ax.add_patch(boxPolygon)
for i in range(0, len(bp['boxes'])):
# Boxes
bp['boxes'][i].set(color=colors[i])
# Whiskers
bp['whiskers'][i*2].set(color=colors[i],
linewidth=1.5,
linestyle='-',
zorder=4)
bp['whiskers'][i*2 + 1].set(color=colors[i],
linewidth=1.5,
linestyle='-',
zorder=4)
# Top and bottom fliers
bp['fliers'][i].set(markerfacecolor=colors[i],
marker='o', alpha=0.75, markersize=3,
markeredgecolor='none', zorder=4)
bp['medians'][i].set(color='black',
linewidth=2,
zorder=5)
# 4 caps to remove
for c in bp['caps']:
c.set_linewidth(0)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(True)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='y', length=0)
def heatmap(x, y, z, ax, title, xlabel, ylabel, xticklabels, yticklabels, cmap='RdBu', hatch='', vmin=0.0, vmax=1.0, show=False, speed='slow', zorder=1):
"""
Inspired by:
- http://stackoverflow.com/a/16124677/395857
- http://stackoverflow.com/a/25074150/395857
"""
# Plot the heatmap
if speed=='slow':
c = ax.pcolor(x, y, z, linewidths=1, cmap=cmap, hatch=hatch, vmin=vmin, vmax=vmax, rasterized=True, zorder=zorder)
# Place the major ticks at the middle of each cell
ax.set_xticks(np.arange(z.shape[1]) + 0.5, minor=False)
ax.set_yticks(np.arange(z.shape[0]) + 0.5, minor=False)
# Set tick labels
ax.set_xticklabels(xticklabels, minor=False, rotation=90)
ax.set_yticklabels(yticklabels, minor=False)
else:
c = ax.pcolormesh(x, y, z, linewidths=1, cmap=cmap, hatch=hatch, vmin=vmin, vmax=vmax, rasterized=True, zorder=zorder)
# Set title and x/y labels
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Remove last blank column
ax.set_xlim(min(x), max(x))
ax.set_ylim(min(y), max(y))
# Turn off all the ticks
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
# Proper orientation (origin at the top left instead of bottom left)
ax.invert_yaxis()
return c
def heatmap_spores(S, ax, title, xlabel, ylabel, xticklabels, yticklabels, fold=False, cmap='RdBu', vmin=0.0, vmax=1.0, radius=0.25):
"""
"""
dict_mat = {u'MATa':{'x':[-radius]*len(S.loc[u'MATa']), 'y':np.arange(0.5,len(S.loc[u'MATa']))},
u'MATα':{'x':np.arange(0.5,len(S.loc[u'MATα'])), 'y':[-radius]*len(S[u'MATα'])}}
for mating in dict_mat.iterkeys():
data = map(list, zip(*[dict_mat[mating]['x'], dict_mat[mating]['y']]))
circles = [plt.Circle([x, y], radius) for (x, y) in data]
# col = patch_collections(circles, edgecolor='black', lw=0.75)
s = S.ix[mating].values
# col.set(array=s, cmap=cmap)
# col.set_clim([vmin, vmax])
# col.set_clip_on(False)
#
# ax.add_collection(col)
return dict_mat
def heatmap_hybrids(H, ax, title, xlabel, ylabel, xticklabels, yticklabels, fold=False, cmap='RdBu', vmin=0.0, vmax=1.0, pad=0.25, legend_title=''):
"""
"""
from matplotlib.ticker import FormatStrFormatter
if fold:
# Get the matrix M and its transpose
X = H.values
Y = H.values.T
# Calculate the element-wise average of the two matrices
Z = np.add(X, Y) / 2.
Z = np.tril(Z) # Get the lower triangle of the matrix
Z = np.ma.masked_array(Z, Z == 0) # Mask the upper triangle
else:
Z = H.values
#
Z = np.ma.array(Z, mask=np.isnan(Z))
cmap.set_bad('0.1',1.)
im = ax.pcolor(Z, edgecolors='lightgrey', linewidths=0.5, cmap=cmap, vmin=vmin, vmax=vmax, rasterized=True)
# Place the major ticks at the middle of each cell
ax.set_xticks(np.arange(Z.shape[1]) + 0.5, minor=False)
ax.set_yticks(np.arange(Z.shape[0]) + 0.5, minor=False)
# Set tick labels
ax.set_xticklabels(xticklabels, minor=False, rotation=90)
ax.set_yticklabels(yticklabels, minor=False)
# Set title and x/y labels
ax.set_title(title, fontsize=6, y=1.15)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Remove last blank column
ax.set_xlim( (0, Z.shape[1]) )
ax.set_ylim( (0, Z.shape[0]) )
# Turn off all the ticks
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
# Proper orientation (origin at the top left instead of bottom left)
ax.invert_yaxis()
# Set equal aspect ratio
ax.set_aspect('equal')
# Add colorbar
cax = inset_axes(ax, width='4%', height='30%', loc=3,
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax.transAxes,
borderpad=0)
cbar = plt.colorbar(im, cax=cax, ticks=[vmin, 0, vmax], format='%.1f')
cbar.ax.set_title(legend_title, horizontalalignment='center', fontsize=6)
cbar.ax.tick_params(labelsize=5)
cbar.locator = ticker.MaxNLocator(nbins=3)
cbar.outline.set_visible(False)
def gw_frequency(data, ax=None, **kwargs):
"""
"""
# Line plots
data.plot(
ax=ax, kind='line',
legend=False, rasterized=True, zorder=2, **kwargs
)
# Draw chromosome shades
chr_coords = utils.chr_coords()
for chrom, g in chr_coords.groupby('chr_arabic'):
ax.axvspan(g.chr_start.squeeze(), g.chr_end.squeeze(),
color=('0.95' if chrom % 2 == 1 else 'w'), lw=0, zorder=0, rasterized=True)
# Axes limits
ax.set_ylim(0, 1)
# Axis tick properties
ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=4))
ax.yaxis.set_minor_locator(ticker.MaxNLocator(nbins=20))
ax.yaxis.set_ticks_position('left')
# Axis tick parameters
ax.tick_params(axis='x', which='major', size=0, labelsize=6)
ax.tick_params(axis='y', which='major', size=2, labelsize=6)
ax.tick_params(axis='both', which='minor', size=1, labelsize=4)
# Grid
ax.yaxis.grid(lw=0.6, ls='-', color='0.9', which='major', zorder=1)
# Turn off the x-axis ticks
for tick in ax.xaxis.get_major_ticks():
tick.tick1On = False
tick.tick2On = False
def chr_frequency(data, ax=None, **kwargs):
"""
"""
# Line plots
data.plot(
ax=ax, kind='line',
legend=False, rasterized=True, zorder=2, **kwargs
)
# Axes limits
ax.set_ylim(0, 1)
# Axis tick properties
ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=4))
ax.yaxis.set_minor_locator(ticker.MaxNLocator(nbins=20))
ax.yaxis.set_ticks_position('left')
# Axis tick parameters
ax.tick_params(axis='x', which='major', size=0, labelsize=6)
ax.tick_params(axis='y', which='major', size=2, labelsize=6)
ax.tick_params(axis='both', which='minor', size=1, labelsize=4)
# Grid
ax.yaxis.grid(lw=0.6, ls='-', color='0.9', which='major', zorder=1)
# Turn off the x-axis ticks
for tick in ax.xaxis.get_major_ticks():
tick.tick1On = False
tick.tick2On = False
def histogram_frequency(data, ax=None, **kwargs):
"""
"""
# Histogram plots
for time in data:
x, y = histogram_binned_data(ax, data[time], bins=50)
ax.plot(x, y, color=config.time['color'][time], lw=0.5, rasterized=True, zorder=1)
ax.fill_between(x, 0, y, label=time, facecolor=config.time['color'][time], zorder=1, **kwargs)
# Axes limits
ax.set_xlim(0, 1)
ax.set_ylim(bottom=0)
# Axis tick properties
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=2))
ax.xaxis.set_minor_locator(ticker.MaxNLocator(nbins=4))
ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=3, prune='upper'))
ax.yaxis.set_ticks_position('right')
# Axis tick parameters
ax.tick_params(axis='both', which='major', size=2, labelsize=6)
ax.tick_params(axis='both', which='minor', size=1, labelsize=4)
# Grid
ax.xaxis.grid(lw=0.6, ls='-', color='0.9', which='minor', zorder=0)
def loh_length(data, ax=None, **kwargs):
"""
"""
data.plot(ax=ax, logy=True, style='.', marker='o', ms=3, mec='none', legend=False, **kwargs)
utils.simple_axes(ax)
# Axes limits
ax.set_xlim(0,1.1E3)
ax.set_ylim(1E-5,1E-0)
# Axes labels
ax.set_xlabel('Homozygosity segment length (kb)')
ax.set_ylabel('Frequency')
# Legend
ax.legend(frameon=False, loc='upper right',
borderaxespad=0., prop={'size':5},
handlelength=0.75)
def loh_fluctuation(data, ax=None, **kwargs):
"""
"""
data.plot(ax=ax, kind='bar', edgecolor='k', legend=False,
error_kw=dict(ecolor='0.1', lw=.75, capsize=.75, capthick=.75),
**kwargs)
utils.simple_axes(ax)
# Set tick labels
ax.set_xticklabels(data.index.get_level_values('environment'), minor=False, rotation=0)
# Set axes labels
ax.set_xlabel('Environment')
ax.set_ylabel(r'Locus-specific LOH rate (gen$^{-1}$)')
# Set log scale
ax.set_yscale('log')
# Draw legend
ax.legend(frameon=False, loc='upper right',
borderaxespad=0., prop={'size':5},
handlelength=0.75)
def filter_multiindex(data, names=None):
"""
Filter multiindex by level 'type'
"""
indexer = [slice(None)]*len(data.index.names)
indexer[data.index.names.index('type')] = names
return data.loc[tuple(indexer),:].dropna(axis=1, how='all')
### Consensus genotype ###
def consensus_genotype(data, ax=None):
"""
Plot the consensus genotype across multiple clones
"""
if len(data) > 0:
x = data.columns.get_level_values('pos_cum').values
y = np.arange(len(data.index))
# Make a color map of fixed colors
cmap = mpl.colors.ListedColormap([config.background['color']['NA/NA'],
config.background['color']['WA/NA'],
config.background['color']['WA/WA']])
bounds = [0,1,2]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
heatmap(np.r_[x, x.max()+1], np.r_[y, y.max()+1], data,
ax, '', '', '', [], [], cmap=cmap, vmin=0, vmax=2)
### SNV/indel mutations ###
def snv_indel_genotype(data, ax=None):
"""
Plot genotype of SNVs/indels
"""
if len(data) > 0:
for ii,(k,g) in enumerate(data.groupby(level='clone')):
g = g.dropna(axis=1)
x = g.columns.get_level_values('pos_cum').values
y = [ii+.5]*len(x)
colors = [config.genotype['color'][int(gt)] for gt in g.values.flatten()]
ax.scatter(x, y, facecolors=colors, edgecolors='k', s=8, rasterized=False, zorder=3)
### Copy number ###
def copy_number(data, ax=None):
"""
Plot copy number aberrations
"""
if len(data.columns) > 0:
print(data)
x = data.columns.get_level_values('pos_cum').values
y = np.arange(len(data.index.get_level_values('clone')))
cmap = mpl.colors.ListedColormap(['none','w','none'])
bounds = [1,2,3]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
for cn, hatch in zip([1, 3], ['xx','--']):
heatmap(np.r_[x, x.max()+1], np.r_[y, y.max()+1], np.ma.masked_array(data, data!=cn),
ax, '', '', '', [], [], cmap=cmap, hatch=hatch, vmin=1, vmax=3, zorder=2)
### LOH ###
def loh_genotype(data, ax=None):
"""
Plot genotype changes due to LOH
"""
if len(data) > 0:
x = data.columns.get_level_values('pos_cum').values
y = np.arange(len(data.index.get_level_values('clone')))
# Make a color map of fixed colors
cmap = mpl.colors.ListedColormap(['k','w','k'])
bounds = [-1,0,1]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
heatmap(np.r_[x, x.max()+1], np.r_[y, y.max()+1], data,
ax, '', '', '', [], [], cmap=cmap, vmin=-1, vmax=1, zorder=1)
def annotate_genotype(data, ax=None):
"""
Annotate mutation genotype
"""
labels = data.columns.get_level_values('gene')
loc = zip(data.columns.get_level_values('pos_cum'), [-.25]*data.shape[1])
for l, xy in zip(labels, loc):
trans = ax.get_xaxis_transform() # x in data units, y in axes fraction
ax.annotate(l, xy=xy, xytext=(0, 4), textcoords='offset points',
arrowprops=dict(arrowstyle='wedge,tail_width=0.7', color='black'),
fontsize=5, style=('italic' if l!='non-coding' else 'normal'),
weight=('bold' if l in ['RNR2','RNR4','FPR1','TOR1'] else 'normal'),
annotation_clip=False, va='bottom', ha='center')
def genome_instability(data, ax=None, title=None):
"""
"""
idx = 0
# Plot tracks
for ii, (s, group) in enumerate(data.groupby(level='set')):
# Consensus genotypes
consensus_data = filter_multiindex(group, names=['consensus'])
nrows = consensus_data.index.get_level_values('clone').nunique()
ax1 = plt.subplot(ax[idx:idx+nrows])
consensus_genotype(consensus_data, ax1)
if ax1.is_first_row():
# Set axis label
labels = ['Consensus']
ax1.set_yticks(np.arange(len(labels)) + 0.5, minor=True)
ax1.set_yticklabels(labels, fontweight='bold', va='center', minor=True)
ax1.set_title(title, fontsize=6, y=2, weight='bold')
# Annotate variants
annotation = filter_multiindex(data, names=['snv_indel'])
annotate_genotype(annotation, ax1)
idx += nrows
# De novo genotypes
de_novo_data = filter_multiindex(group, names=['snv_indel','copy_number','loh'])
labels = de_novo_data.index.get_level_values('clone').unique()
nrows = len(labels)
ax2 = plt.subplot(ax[idx:idx+nrows], sharex=ax1)
# SNV/indel
snv_indel_data = filter_multiindex(group, names=['snv_indel'])
snv_indel_genotype(snv_indel_data, ax2)
# Copy number
copy_number_data = filter_multiindex(group, names=['copy_number'])
copy_number(copy_number_data, ax2)
# LOH
loh_data = filter_multiindex(group, names=['loh'])
loh_genotype(loh_data, ax2)
# Annotate clonal lineages
ax2.set_yticks(np.arange(len(labels)) + 0.5)
ax2.set_yticklabels('C' + labels, fontweight='bold', va='center')
[ax2.axhline(g, lw=0.5, ls="-", color="lightgray") for g in np.arange(len(labels))]
lineage = group.index.get_level_values('lineage').unique()[0]
ax2.tick_params(axis='y', colors=config.lineages[lineage]['fill'], width=5, which='both')
# Show chromosome boundaries
chrom_boundaries(ax2)
idx += nrows
# Set axis label
ax2.set_xlabel('Chromosome')
def scatter_plot(x, y, ax=None, **kwargs):
"""
"""
ax.plot(x, y, linestyle='', rasterized=False, **kwargs)#, label=config.population['long_label'][t])
from matplotlib.ticker import MaxNLocator
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5, prune='upper'))
ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=5, prune='upper'))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def histogram_x(X, ax=None, time=None):
"""
"""
import gmm
# Fit the Gaussian mixture model
N = np.arange(1, 4)
models = gmm.gmm_fit(X, N)
# Compute the AIC and the BIC
AIC = [m.aic(X) for m in models]
BIC = [m.bic(X) for m in models]
M_best = models[np.argmin(BIC)]
# Plot data
bins = 34
xbins, y = histogram_binned_data(ax, X, bins=bins)
ax.fill_between(xbins, 0, y, label=config.population['long_label'][time],
alpha=0.5, facecolor=config.population['color'][time])
xbins = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 1000)
logprob = M_best.score_samples(np.array([xbins]).T)
pdf = np.exp(logprob)
ax.plot(xbins, pdf / bins, '-',
color=config.population['color'][time], lw=1)
# Mean of the distribution
for p in abs(M_best.means_.ravel()):
ax.axvline(x=p, ls='--', lw=1.5, color=config.population['color'][time], zorder=1)
pos = ax.get_ylim()[0] * 0.75 + ax.get_ylim()[1] * 0.25
trans = ax.get_xaxis_transform() # x in data units, y in axes fraction
ax.annotate(np.around(p, 2),
xy=(p, 0.85), xycoords=trans, fontsize=6,
color='k', va='center',
ha=('right' if time=='ancestral' else 'left'),
xytext=((-5 if time=='ancestral' else 5),0), textcoords='offset points',
path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")])
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks(ax.get_yticks()[1:])
ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=2))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def histogram_y(Y, ax=None, time=None):
"""
"""
import gmm
# Fit the Gaussian mixture model
N = np.arange(1, 4)
models = gmm.gmm_fit(Y, N)
# Compute the AIC and the BIC
AIC = [m.aic(Y) for m in models]
BIC = [m.bic(Y) for m in models]
M_best = models[np.argmin(BIC)]
# Plot data
bins = 34
ybins, x = histogram_binned_data(ax, Y, bins=bins)
ax.fill_betweenx(ybins, 0, x, label=config.population['long_label'][time],
alpha=0.5, facecolor=config.population['color'][time])
ybins = np.linspace(ax.get_ylim()[0], ax.get_ylim()[1], 1000)
logprob = M_best.score_samples(np.array([ybins]).T)
pdf = np.exp(logprob)
ax.plot(pdf / bins, ybins, '-',
color=config.population['color'][time], lw=1)
# Mean of the distribution
for p in abs(M_best.means_.ravel()):
ax.axhline(y=p, ls='--', lw=1.5, color=config.population['color'][time], zorder=1)
pos = ax.get_xlim()[0] * 0.75 + ax.get_xlim()[1] * 0.25
trans = ax.get_yaxis_transform() # x in data units, y in axes fraction
ax.annotate(np.around(p, 2),
xy=(0.85, p), xycoords=trans, fontsize=6,
color='k', ha='center',
va=('bottom' if time=='ancestral' else 'top'),
xytext=(0,(-10 if time=='ancestral' else 10)), textcoords='offset points',
path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_xticks(ax.get_xticks()[1:])
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=2))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def lollipops(data, ax=None):
"""
"""
data = data.agg([np.mean, np.median, np.std, 'count'])
if len(data)>0:
x_data = np.array(data)
y_data = np.repeat([0.2*(ax.get_ylim()[1]-ax.get_ylim()[0])], len(x_data))
arr = zip(x_data, y_data)
markerline, stemlines, baseline = ax.stem(x_data, y_data)
plt.setp(markerline, 'color', config.population['color'][time],
markersize = 2.75, markeredgewidth=.75, markeredgecolor='k', zorder=3)
plt.setp(stemlines, linewidth=.75, color=config.population['color'][time],
path_effects=[path_effects.withStroke(linewidth=1.25, foreground='k')], zorder=2)
plt.setp(baseline, 'color', 'none', zorder=1)
# for idx, label in data.iterrows():
# ax.annotate(label.name[1],
# xy = (label, 0.2), xycoords=('data','axes fraction'),
# xytext = (0, 8), textcoords = 'offset points',
# ha = 'center', va = 'top',
# fontsize = 6, style = 'italic',
# path_effects=[path_effects.withStroke(linewidth=0.5, foreground="w")])
import seaborn as sns
def scatter_rank_correlation(data, ax=None, environment=None):
"""
Scatter plot - Rank correlation
"""
if ax is None:
ax = plt.gca()
# Define colour palettes
colors = [config.population['color'].get(e, 'k') for e in data.columns]
# # Scatter plot
# sns.stripplot(ax=ax, data=data[data['environment']=='YNB'],
# x="population", y="value", hue="group", marker='o', size=7,#marker='marker',
# palette=colors, clip_on=False)
#
# sns.stripplot(ax=ax, data=data[data['environment']==environment],
# x="population", y="value", hue="group", marker='^', size=7,
# palette=colors, clip_on=False)
# # Remove default legend
# ax.legend_.remove()
# Mean expectation
ax.axvline(x=0.0, c='lightgray', ls='--', lw=2)
# Axes limits
ax.set_xlim((-1,1))
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5))
ax.tick_params(axis='x-axis', which='major', size=2, labelsize=6)
ax.tick_params(axis='y-axis', which='major', size=0, labelsize=7)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
def chrom_boundaries(ax=None):
"""
Show chromosome boundaries
"""
# Set labels
chr_coords = utils.chr_coords()
ticks = chr_coords.chr_start + (chr_coords.chr_end - chr_coords.chr_start)/2.
labels = chr_coords.chr_roman
ax.set_xticks(ticks)
ax.set_xticklabels(labels)
# Show grid
start = chr_coords.chr_start
grid=[x+1. for x in list(set(start))]
[ax.axvline(g, lw=0.5, ls="-", color="gray") for g in grid]
def set_custom_labels(index, pos):
"""
Custom labels for nested axes.
index :
pos :
"""
start = dict((m[pos], ii) for ii,m in enumerate(index.values))
end = dict((m[pos], len(index)-ii-1) for ii,m in enumerate(index[::-1].values))
labels = dict((key, (start[key] + end.get(key, 0))/2.) for key in end.keys())
return start, end, labels
def connect_bbox(bbox1, bbox2,
loc1a, loc2a, loc1b, loc2b,
prop_lines, prop_connector=None, prop_patches=None):
"""
"""
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector, BboxConnectorPatch
if prop_connector is None:
prop_connector = prop_lines.copy()
prop_connector["alpha"] = prop_connector.get("alpha", 1)*0.2
if prop_patches is None:
prop_patches = prop_lines.copy()
prop_patches["alpha"] = prop_patches.get("alpha", 1)*0.2
c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)
c1.set_clip_on(False)
c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)
c2.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
p = BboxConnectorPatch(bbox1, bbox2,
loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b,
**prop_connector)
p.set_fill(True)
p.set_clip_on(False)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect(ax1, ax2, xmin, xmax, **kwargs):
"""
ax1 : the main axes
ax1 : the zoomed axes
(xmin,xmax) : the limits of the colored area in both plot axes.
connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will
be marked. The keywords parameters will be used to create
patches.
"""
from matplotlib.transforms import Bbox, TransformedBbox, blended_transform_factory
trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)
trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)
bbox = Bbox.from_extents(xmin, 0, xmax, 1)
mybbox1 = TransformedBbox(bbox, trans1)
mybbox2 = TransformedBbox(bbox, trans2)
prop_connector = kwargs.copy()
prop_connector["edgecolor"] = "none"
prop_connector["facecolor"] = "gray"
prop_connector["alpha"] = 0.4
prop_patches = kwargs.copy()
prop_patches["edgecolor"] = "none"
prop_patches["facecolor"] = "gray"
prop_patches["alpha"] = 0.4
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_connector=prop_connector, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
# ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
def get_text_positions(x_data, y_data, txt_width, txt_height):
"""
"""
a = zip(y_data, x_data)
text_positions = y_data.copy()
for index, (y, x) in enumerate(a):
local_text_positions = [i for i in a if i[0] > (y - txt_height)
and (abs(i[1] - x) < txt_width * 2) and i != (y,x)]
if local_text_positions:
sorted_ltp = sorted(local_text_positions)
if abs(sorted_ltp[0][0] - y) < txt_height: #True == collision
differ = np.diff(sorted_ltp, axis=0)
a[index] = (sorted_ltp[-1][0] + txt_height, a[index][1])
text_positions[index] = sorted_ltp[-1][0] + txt_height
for k, (j, m) in enumerate(differ):
#j is the vertical distance between words
if j > txt_height * 2: #if True then room to fit a word in
a[index] = (sorted_ltp[k][0] + txt_height, a[index][1])
text_positions[index] = sorted_ltp[k][0] + txt_height
break
return text_positions
def text_plotter(x_data, y_data, text_positions, axis,txt_width,txt_height):
"""
"""
for x,y,t in zip(x_data, y_data, text_positions):
axis.text(x - txt_width, 1.01*t, '%d'%int(y),rotation=0, color='blue')
if y != t:
axis.arrow(x, t,0,y-t, color='red',alpha=0.3, width=txt_width*0.1,
head_width=txt_width, head_length=txt_height*0.5,
zorder=0,length_includes_head=True)
def annotate_custom(ax, s, xy_arr=[], *args, **kwargs):
"""
"""
ans = []
an = ax.annotate(s, xy_arr[0], *args, **kwargs)
ans.append(an)
d = {}
try:
d['xycoords'] = kwargs['xycoords']
except KeyError:
pass
try:
d['arrowprops'] = kwargs['arrowprops']
except KeyError:
pass
for xy in xy_arr[1:]:
an = ax.annotate(s, xy, alpha=0.0, xytext=(0,0), textcoords=an, **d)
ans.append(an)
return ans
def colorbar_index(ncolors, cmap):
"""
"""
cmap = colormap_discretize(cmap, ncolors)
mappable = plt.cm.ScalarMappable(cmap=cmap)
mappable.set_array([])
mappable.set_clim(-0.5, ncolors+0.5)
return mappable
def colormap_discretize(cmap, N):
"""
Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
Example
x = resize(arange(100), (5,100))
djet = cmap_discretize(cm.jet, 5)
imshow(x, cmap=djet)
"""
import matplotlib.colors as mcolors
if type(cmap) == str:
cmap = plt.get_cmap(cmap)
colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
indices = np.linspace(0, 1., N+1)
cdict = {}
for ki,key in enumerate(('red','green','blue')):
cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki])
for i in xrange(N+1) ]
# Return colormap object
return mcolors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
def save_figure(filename, formats=['pdf','png','svg'], **kwargs):
"""
Save matplotlib figure in multiple formats (pdf, png, svg).
formats: list of formats.
kwargs: arguments to plt.savefig (e.g. dpi).
"""
if 'pdf' in formats:
plt.savefig(filename+'.pdf', bbox_inches='tight', **kwargs)
if 'png' in formats:
plt.savefig(filename+'.png', bbox_inches='tight', **kwargs)
if 'svg' in formats:
plt.savefig(filename+'.svg', bbox_inches='tight', **kwargs)
| mit |
mbokulic/bmt_parser | bmt_parser/main.py | 1 | 3797 | '''
This is the top-level script for
- parsing the xml using mets and alto files
- disambiguating names from the parsed data
- transforming the data into a suitable format for drawing graphs
Uses logging, logs to "parse.log" in root. It will empty the log at start.
'''
import logging
import argparse
import os
import pandas as pd
import bmt_parser.config as cf
import bmt_parser.parse_xml as parse_xml
import bmt_parser.disambiguate_names as disambiguate
import bmt_parser.collaborators as for_graph
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
logger.addHandler(stream_handler)
open('parse.log', 'w').close() # emptying log
file_handler = logging.FileHandler('parse.log')
file_handler.setLevel(logging.WARNING)
logger.addHandler(file_handler)
parser = argparse.ArgumentParser(
description='A script for parsing Blue Mountain periodicals data '
'https://github.com/pulibrary/BlueMountain. You first need to download '
'the data using the provided bash script. Outputs will be stored in paths '
'as provided in config.py')
parser.add_argument('--periodical_name', '-name', required=False, help='name '
'of the periodical you wish to parse. Will be used for '
'naming output')
parser.add_argument('--xml_data_path', '-xml', required=False, help='dir '
'where the Blue Mountain data is located. If not provided,'
' no parsing will be done (a lengthy process, you probably'
' want to skip this after the first time)')
parser.add_argument('--disambiguation_file', '-d', required=False,
help='path to csv with disambiguations, needs to be tab '
'delimited (Blue Mountain provides an Excel file so you '
'will need to convert. If not provided, no disambiguation '
'will be done.')
parser.add_argument('--name_replacements', '-repl', required=False,
help='Optional: path to json that has replacements for the'
'resolved name ("Unique Names") column in the '
'disambiguation file')
parser.add_argument('--tf_for_graph', '-graph', required=False, default=False,
action='store_true', help='Flag whether to transform data '
'for displaying it as a graph. Will try to find data in '
'paths as defined in config.py.')
args = parser.parse_args()
# creating output dir if it does not exist
if not os.path.exists(cf.OUTPUT_DIR):
os.makedirs(cf.OUTPUT_DIR)
# putting periodical name in output paths
if args.periodical_name:
paths = {
key: os.path.join(cf.OUTPUT_DIR,
'_'.join([args.periodical_name, cf.PATHS[key]]))
for key in cf.PATHS.keys()}
else:
paths = {
key: os.path.join(cf.OUTPUT_DIR, cf.PATHS[key])
for key in cf.PATHS.keys()}
# running parser and data transformation code
if args.xml_data_path:
parse_xml.main(args.xml_data_path, paths['data'])
if args.disambiguation_file:
data = disambiguate.main(args.disambiguation_file, paths['data'],
args.name_replacements)
data.to_csv(paths['disamb'], sep=cf.CSV_SEP, index=False)
if args.tf_for_graph:
if not args.disambiguation_file:
if os.path.exists(paths['disamb']):
data = pd.read_csv(paths['disamb'], sep=cf.CSV_SEP)
elif os.path.exists(paths['data']):
data = pd.read_csv(paths['data'], sep=cf.CSV_SEP)
else:
raise ValueError('no data file for this periodical!')
collabs = for_graph.get_collaborators(data)
collabs.to_csv(paths['collabs'], sep=cf.CSV_SEP, index=False)
| mit |
eickenberg/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
uzbit/mlutils | mlutils.py | 1 | 20009 | import os
import random
import numpy as np
import pickle as pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, matthews_corrcoef, make_scorer, roc_curve, auc
from sklearn.model_selection import train_test_split
from .MetaClassifier import MetaClassifier
LOGIT_ACCEPT_RATE = 0.5
SEED = 42
def plot_hist(y1, y2 = None, binFactor=50.0, title=''):
import matplotlib.pyplot as plt
thisMax = max(y1)
thisMin = min(y1)
if y2 is not None:
max2 = max(y2)
min2 = min(y2)
thisMax = max(thisMax, max2)
thisMin = min(thisMin, min2)
thisWidth = (thisMax - thisMin)/binFactor
try:
plt.hist(y1, alpha = 0.5, bins=np.arange(thisMin, thisMax + thisWidth, thisWidth), label='y1')
if y2 is not None:
plt.hist(y2, alpha = 0.5, bins=np.arange(thisMin, thisMax + thisWidth, thisWidth), label='y2')
except IndexError:
print(title, 'had no values!')
plt.title(title)
plt.legend()
plt.show()
def plot_importance(clf, columns):
import matplotlib.pyplot as plt
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.figure(figsize=(12, 6))
plt.subplot(1, 1, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
def plot_deviance(clf, X, y, n_estimators):
import matplotlib.pyplot as plt
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
clf.fit(X_train, y_train)
test_score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
#plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(n_estimators) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(n_estimators) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
plt.show()
def get_classification(y, rate=0.5):
return np.array([1 if x else 0 for x in y >= rate])
def get_labelencoder(column_values):
le = LabelEncoder()
le.fit(column_values)
return le
def get_remove_features(df, featureColumns, N=4):
removeList = []
for feat in featureColumns:
vals = df[feat].values
nthtile = np.percentile(vals, np.arange(0, 100, N))
nth0 = nthtile[0]
countDiff = 0
for nth in nthtile:
if nth != nth0:
countDiff += 1
if countDiff == 0:
removeList.append(feat)
return removeList
def transform_column(le, df, column):
df[column] = le.transform(df[column])
def do_evo_search(X, y,
grid={}, scorer=None, cv=3,
population_size=50, mutation_prob=0.3, #crossover_prob=0.5,
generations_number=20, n_jobs=4,
gridpickle='bestParams.pickle'):
print("Performing evolutionary XGBoost search...")
import xgboost as xgb
from evolutionary_search import EvolutionaryAlgorithmSearchCV
from sklearn.pipeline import Pipeline
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, stratify=y, random_state=SEED)
print("Training on ", X_test.shape)
if not grid:
grid = dict()
grid['xgb__learning_rate'] = [0.001, 0.05, 0.1]
grid['xgb__max_depth'] = [3, 5, 10, 20]
grid['xgb__gamma'] = [0, 1, 10]
grid['xgb__subsample'] = [0.75, 1]
grid['xgb__colsample_bytree'] = [0.75, 1]
grid['xgb__min_child_weight'] = [1, 3, 5, 10]
#grid['xgb__base_score'] = [0.1]
grid['xgb__max_delta_step'] = [0, 1, 5]
grid['xgb__n_estimators'] = [200, 500, 1000]
grid['xgb__reg_lambda'] = [1, 10, 100]
grid['xgb__reg_alpha'] = [1, 10, 100]
grid['xgb__silent'] = [1]
grid['xgb__objective'] = ['binary:logistic']
#grid['pca__n_components'] = [50, 100, 200]
if not scorer:
scorer = make_scorer(scorer_auc, greater_is_better=True)
pipeline = Pipeline(steps=[
('xgb', xgb.XGBClassifier())
])
clf = EvolutionaryAlgorithmSearchCV(
pipeline,
grid,
scoring=scorer,
verbose=True,
n_jobs=n_jobs,
cv=cv,
population_size=population_size,
mutation_prob=mutation_prob,
generations_number=generations_number,
)
if gridpickle and os.path.exists(gridpickle):
bestParams = pickle.load(open(gridpickle, 'rb'))
else:
clf.fit(X_test, y_test)
print("Best score", clf.best_score_)
print("Best params", clf.best_params_)
bestParams = {x.split('__')[1]:clf.best_params_[x] for x in clf.best_params_ if x.split('__')[0] == 'xgb'}
pickle.dump(bestParams, open(gridpickle, 'wb'))
print(bestParams)
return bestParams
def do_xgboost_hyperopt_search(X, y, cv=3, maxEvals=10, testSize=0.2, seed=SEED):
if os.path.exists('bestParams.pickle'):
return pickle.load(open('bestParams.pickle', 'rb'))
import xgboost as xgb
from hyperopt import hp
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
print("Performing hyperopt search...")
intChoices = {
'n_estimators': np.arange(300, 10000, dtype=int),
'max_depth': np.arange(3, 100, dtype=int),
}
space = {
'n_estimators' : hp.choice('n_estimators', intChoices['n_estimators']),
'learning_rate' : hp.uniform('learning_rate', 0.0001, 0.01),
'max_depth' : hp.choice('max_depth', intChoices['max_depth']),
'min_child_weight' : hp.uniform('min_child_weight', 0, 20),
'subsample' : hp.uniform('subsample', 0.6, 1),
'gamma' : hp.uniform('gamma', 0.6, 1),
'reg_alpha' : hp.uniform('reg_alpha', 0, 1),
'reg_lambda' : hp.uniform('reg_lambda', 1, 100),
'colsample_bytree' : hp.uniform('colsample_bytree', 0.6, 1),
'objective':'binary:logistic',
'silent' : 1
}
def score(params):
results = list()
print("Testing for ", params)
for i in range(cv):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=testSize, stratify=y, random_state=seed+i
)
print("Train shape", X_train.shape)
clf = xgb.XGBClassifier(**params)
clf.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
early_stopping_rounds = 100,
eval_metric='auc'
)
probs = clf.predict_proba(X_test, ntree_limit=clf.best_iteration)[:,1]
fpr, tpr, _ = roc_curve(y_test, probs, pos_label=1)
results.append(auc(fpr, tpr))
print("Outcomes: ", results)
print("This score:", 1.0-np.mean(results))
print()
return {'loss': 1.0-np.mean(results), 'status': STATUS_OK}
trials = Trials()
bestParams = fmin(score, space,
algo=tpe.suggest,
trials=trials,
max_evals=maxEvals
)
for intChoice in intChoices:
bestParams[intChoice] = intChoices[intChoice][bestParams[intChoice]]
print("Saving the best parameters: ", bestParams)
pickle.dump(bestParams, open('bestParams.pickle', 'wb'))
return bestParams
def do_lnn_hyperopt_search(X, y, cv=3, maxEvals=10, testSize=0.2, seed=SEED):
from hyperopt import hp
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
print("Performing LNN hyperopt search...")
intParams = [
'dense0_num_units',
'dense1_num_units',
'dense2_num_units',
'max_epochs',
]
space = {
'dense0_num_units' : hp.qloguniform('dense0_num_units', np.log(1e3), np.log(1e4), 1), #hp.choice('dense0_num_units', intChoices['dense0_num_units']),
'dense1_num_units' : hp.qloguniform('dense1_num_units', np.log(1e2), np.log(1e3), 1), #hp.choice('dense1_num_units', intChoices['dense1_num_units']),
'dense2_num_units' : hp.qloguniform('dense2_num_units', np.log(1e1), np.log(1e2), 1), #hp.choice('dense2_num_units', intChoices['dense2_num_units']),
'update_learning_rate' : hp.loguniform('update_learning_rate', np.log(1e-4), np.log(1e-1)),
'dropout0_p' : hp.uniform('dropout0_p', 0.1, 0.5),
'dropout1_p' : hp.uniform('dropout1_p', 0.1, 0.5),
'dropout2_p' : hp.uniform('dropout2_p', 0.1, 0.5),
'max_epochs' : hp.qloguniform('max_epochs', np.log(5e1), np.log(1e2), 1), #hp.choice('max_epochs', intChoices['max_epochs']),
'train_split' : hp.uniform('train_split', 0.199999, 0.2),
}
def score(params):
results = list()
print("Testing for ", params)
params['input_shape'] = X.shape[1]
params['output_shape'] = 2
for param in intParams:
params[param] = int(params[param])
for i in range(cv):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=testSize, stratify=y, random_state=seed+i
)
print("Train shape", X_train.shape)
mcObj = MetaClassifier()
mcObj.resetEstimatorList() # why is this fucking necessary?!
mcObj.addLNN(
preproc='scale',
params=params
)
mcObj.fit(X_train, y_train)
results.append(get_auc(mcObj, X_test, y_test))
print("Outcomes: ", results)
print("This score:", 1.0-np.mean(results))
print()
return {'loss': 1.0-np.mean(results), 'status': STATUS_OK}
trials = Trials()
bestParams = fmin(score, space,
algo=tpe.suggest,
trials=trials,
max_evals=maxEvals,
#rseed=None
)
for param in intParams:
bestParams[param] = int(bestParams[param])
bestParams['input_shape'] = X.shape[1]
bestParams['output_shape'] = 2
print("Saving the best parameters: ", bestParams)
pickle.dump(bestParams, open('bestParams_lnn.pickle', 'wb'))
return bestParams
def do_knn_hyperopt_search(X, y, cv=3, maxEvals=10, testSize=0.2, seed=SEED):
from hyperopt import hp
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers.local import LocallyConnected1D
from keras.optimizers import SGD
print("Performing KNN hyperopt search...")
intParams = [
'dense0_num_units',
'dense1_num_units',
'dense2_num_units',
'nb_epoch',
#'batch_size',
]
space = {
'dense0_num_units' : hp.qloguniform('dense0_num_units', np.log(1e3), np.log(1e4), 1), #hp.choice('dense0_num_units', intChoices['dense0_num_units']),
'dense1_num_units' : hp.qloguniform('dense1_num_units', np.log(1e2), np.log(1e3), 1), #hp.choice('dense1_num_units', intChoices['dense1_num_units']),
'dense2_num_units' : hp.qloguniform('dense2_num_units', np.log(1e1), np.log(1e2), 1), #hp.choice('dense2_num_units', intChoices['dense2_num_units']),
'update_learning_rate' : hp.loguniform('update_learning_rate', np.log(1e-4), np.log(1e-1)),
'dropout0_p' : hp.uniform('dropout0_p', 0.1, 0.5),
'dropout1_p' : hp.uniform('dropout1_p', 0.1, 0.5),
'dropout2_p' : hp.uniform('dropout2_p', 0.1, 0.5),
'nb_epoch' : hp.qloguniform('nb_epoch', np.log(5e1), np.log(1e2), 1), #hp.choice('max_epochs', intChoices['max_epochs']),
#'batch_size' : hp.quniform('batch_size', 1, 32, 1),
}
def score(params):
results = list()
print("Testing for ", params)
def build_fn():
model = Sequential()
sgd = SGD(lr=params['update_learning_rate'], decay=1e-6, momentum=0.9, nesterov=True)
model.add(Dense(int(params['dense0_num_units']),
input_dim=params['input_shape'] ,
init='uniform', activation='tanh')
)
model.add(Dropout(params['dropout0_p']))
model.add(Dense(int(params['dense1_num_units']),
init='uniform', activation='tanh')
)
model.add(Dropout(params['dropout1_p']))
model.add(Dense(int(params['dense2_num_units']),
init='uniform', activation='tanh')
)
model.add(Dropout(params['dropout2_p']))
model.add(Dense(int(params['output_shape']),
init='uniform', activation='sigmoid')
)
# Compile model
model.compile(
loss='binary_crossentropy',
optimizer='adagrad', metrics=['accuracy'],
)
return model
params['input_shape'] = X.shape[1]
params['output_shape'] = 1
for param in intParams:
params[param] = int(params[param])
for i in range(cv):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=testSize, stratify=y, random_state=seed+i
)
print("Train shape", X_train.shape)
mcObj = MetaClassifier()
mcObj.resetEstimatorList() # why is this fucking necessary?!
mcObj.addKNN(
preproc='scale',
params={
'build_fn': build_fn,
'nb_epoch': int(params['nb_epoch']),
#'batch_size': int(params['batch_size']),
}
)
mcObj.fit(X_train, y_train)
results.append(get_auc(mcObj, X_test, y_test))
print("Outcomes: ", results)
print("This score:", 1.0-np.mean(results))
print()
return {'loss': 1.0-np.mean(results), 'status': STATUS_OK}
trials = Trials()
bestParams = fmin(score, space,
algo=tpe.suggest,
trials=trials,
max_evals=maxEvals,
)
for param in intParams:
bestParams[param] = int(bestParams[param])
bestParams['input_shape'] = X.shape[1]
bestParams['output_shape'] = 1
print("Saving the best parameters: ", bestParams)
pickle.dump(bestParams, open('bestParams_knn.pickle', 'wb'))
return bestParams
def do_bayes_search(X, y, cv=3, testSize=0.3):
if os.path.exists('bestParams.pickle'):
return pickle.load(open('bestParams.pickle', 'rb'))
print("Performing Bayesian search...")
from bayes_opt.bayesian_optimization import BayesianOptimization
import warnings
warnings.filterwarnings("ignore")
def xgboostcv(
eta,
max_depth,
num_round,
gamma,
subsample,
max_delta_step,
min_child_weight,
colsample_bytree,
rate_drop,
skip_drop,
reg_alpha,
reg_lambda,
seed=1234,
scorer=None
):
param = {
'eta':eta,
'max_depth':int(round(max_depth)),
'num_round':int(round(num_round)),
'gamma':max(0, gamma),
'subsample':max(0, subsample),
'max_delta_step':max(0, max_delta_step),
'min_child_weight':max(0, min_child_weight),
'colsample_bytree':max(0, colsample_bytree),
#'rate_drop': max(0, rate_drop),
#'skip_drop': max(0, skip_drop),
'reg_alpha': max(0, reg_alpha),
'reg_lambda': max(0, reg_lambda),
'silent':1,
'objective':'binary:logistic',
'nthread':4,
}
results = list()
for i in range(cv):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=testSize, stratify=y, random_state=seed+i
)
xg_train = xgb.DMatrix(X_train, label=y_train)
xg_test = xgb.DMatrix(X_test, label=y_test)
model = xgb.train(
param,
xg_train,
param['num_round'],
evals=[(xg_test, 'test')],
feval=eval_auc,
#early_stopping_rounds=EARLY_STOPPING
)
preds = model.predict(xg_test, ntree_limit=model.best_iteration)
results.append(eval_auc(preds, xg_test)[1])
print("Outcomes: ", results)
return np.mean(results)
xgboostBO = BayesianOptimization(
xgboostcv,
{
'eta': (0.001, 0.5),
#'learning_rate': (0.001, 0.5),
'max_depth': (3, 50),
'num_round': (100, 1000),
'gamma': (0, 100),
'reg_lambda': (0, 1000),
'reg_alpha': (0., 1.),
'subsample': (0.8, 1.0),
'colsample_bytree': (0.8, 1.0),
'max_delta_step': (0, 10),
'min_child_weight': (1, 50),
#'rate_drop': (0., 1.),
#'skip_drop': (0.7, 1.),
}
)
xgboostBO.maximize(init_points=20, restarts=15, n_iter=50)
print('XGBOOST: %f' % xgboostBO.res['max']['max_val'])
bestParams = xgboostBO.res['max']['max_params']
bestParams['max_depth'] = int(round(bestParams['max_depth']))
bestParams['num_round'] = int(round(bestParams['num_round']))
pickle.dump(bestParams, open('bestParams.pickle', 'wb'))
return bestParams
def do_random_search(X, y, nIter=3,
gridpickle='bestParams.pickle'):
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
clf = xgb.XGBClassifier()
grid = dict()
grid['max_depth'] = sp_randint(3, 15)
grid['learning_rate'] = sp_uniform(loc=0.001, scale=0.1)
grid['n_estimators'] = sp_randint(100, 1500)
grid['silent'] = [True]
grid['objective'] = ['binary:logistic']
grid['gamma'] = sp_randint(1, 100)
grid['min_child_weight'] = sp_randint(0, 20)
grid['max_delta_step'] = sp_randint(0, 10)
grid['subsample'] = sp_uniform(loc=0.7, scale=0.29)
grid['colsample_bytree'] = sp_uniform(loc=0.7, scale=0.29)
grid['reg_alpha'] = sp_uniform(loc=0.0, scale=1.0)
grid['reg_lambda'] = sp_uniform(loc=1, scale=99)
def report(grid_scores):
top_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True)
for i, score in enumerate(top_scores):
print(("Model with rank: {0}".format(i + 1)))
print(("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores))))
print(("Parameters: {0}".format(score.parameters)))
print("")
return top_scores[0]
if gridpickle and os.path.exists(gridpickle):
bestParams = pickle.load(open(gridpickle, 'rb'))
else:
randomSearchCV = RandomizedSearchCV(
clf,
cv=3,
scoring='roc_auc',
param_distributions=grid,
n_iter=nIter,
random_state=SEED,
verbose=100
)
randomSearchCV.fit(X, y)
report(randomSearchCV.grid_scores_)
bestParams = randomSearchCV.best_params_
print(bestParams)
pickle.dump(bestParams, open(gridpickle, 'wb'))
return bestParams
def print_feature_importance(model, cols):
fmap = model.get_fscore()
print("There are %d cols and only %d are used." % (len(cols), len(list(fmap.keys()))))
sortedList = list()
for feat, score in sorted(list(fmap.items()), key=lambda x: x[1], reverse=True):
feat_idx = int(feat[1:])
sortedList.append([feat_idx, fmap[feat], cols[feat_idx]])
print(sortedList[-1])
return sortedList
def print_confusion_matrix(label, preds, labels=None):
cm = confusion_matrix(label, preds, labels=labels)
print("confusion matrix:")
print("label=class0, pred=class0", cm[0][0])
print("label=class1, pred=class1", cm[1][1])
print("label=class0, pred=class1", cm[0][1])
print("label=class1, pred=class0", cm[1][0])
print("Class0 True rate", cm[0][0]/float(cm[0][0]+cm[0][1]))
print("Class1 True rate", cm[1][1]/float(cm[1][1]+cm[1][0]))
print("Class0 False rate", cm[0][1]/float(cm[0][0]+cm[0][1]))
print("Class1 False rate", cm[1][0]/float(cm[1][1]+cm[1][0]))
def get_confusion_rates(label, preds, labels=None):
cm = confusion_matrix(label, preds, labels=labels)
ret = {
"class0true": cm[0][0]/float(cm[0][0]+cm[0][1]),
"class1true": cm[1][1]/float(cm[1][1]+cm[1][0]),
"class0false": cm[0][1]/float(cm[0][0]+cm[0][1]),
"class1false": cm[1][0]/float(cm[1][1]+cm[1][0]),
}
return ret
def get_auc(clf, X_test, y_test):
probs = clf.predict_proba(X_test)[:,1]
fpr, tpr, _ = roc_curve(y_test, probs, pos_label=1)
thisAUC = auc(fpr, tpr)
return thisAUC
def scorer_auc(labels, preds):
fpr, tpr, _ = roc_curve(labels, preds, pos_label=1)
score = auc(fpr, tpr)
return score
def eval_auc(preds, dtrain):
labels = dtrain.get_label()
fpr, tpr, _ = roc_curve(labels, preds, pos_label=1)
score = auc(fpr, tpr)
return 'auc', score
def eval_error(preds, dtrain):
labels = dtrain.get_label()
return 'error', float(sum(labels != (preds > RETURN_ACCEPT_RATE))) / len(labels)
def scorer_mcc(labels, preds):
preds = get_classification(preds, rate=LOGIT_ACCEPT_RATE)
coeff = matthews_corrcoef(labels, preds)
return coeff
def eval_mcc(preds, dtrain):
labels = dtrain.get_label()
preds = get_classification(preds, rate=LOGIT_ACCEPT_RATE)
coeff = matthews_corrcoef(labels, preds)
return 'MCC', -coeff
def eval_custom(preds, dtrain):
labels = dtrain.get_label()
preds = get_classification(preds, rate=LOGIT_ACCEPT_RATE)
cm = confusion_matrix(labels, preds)
alpha = 1.0
beta = 1.0
if cm[1][1] > 0 and cm[0][0] > 0:
pos = float(cm[1][0])/cm[1][1]
neg = float(cm[0][1])/cm[0][0]
score = 1. - alpha*pos - beta*neg + pos*neg*alpha*beta
else:
score = -(float(cm[0][1])+float(cm[1][0]))
return 'custom', -score
def eval_custom2(preds, dtrain):
labels = dtrain.get_label()
preds = get_classification(preds, rate=LOGIT_ACCEPT_RATE)
cm = confusion_matrix(labels, preds)
if cm[0][1] > 0 and cm[1][1]+cm[1][0] > 0 and cm[0][0]+cm[0][1] > 0:
tpRate = (cm[1][1]/float(cm[1][1]+cm[1][0]))
fpRate = (cm[0][1]/float(cm[0][0]+cm[0][1]))
score = tpRate/fpRate
else:
score = -(float(cm[0][1])+float(cm[1][0]))
return 'custom2', -score
| apache-2.0 |
ltiao/scikit-learn | sklearn/gaussian_process/kernels.py | 5 | 65974 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import inspect
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Entries
-------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = (bounds == "fixed")
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self' and store remaining arguments in params
args = args[1:]
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i*k_dims:(i+1)*k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0/3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
stan-dev/math | benchmarks/benchmark.py | 1 | 29437 | #!/usr/bin/python
from __future__ import print_function
import itertools
import numbers
import os
import subprocess
import sys
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
HERE = os.path.dirname(os.path.realpath(__file__))
TEST_FOLDER = os.path.abspath(os.path.join(HERE, "..", "test"))
sys.path.append(TEST_FOLDER)
from sig_utils import *
WORKING_FOLDER = "./benchmarks/"
BENCHMARK_TEMPLATE = """
static void {benchmark_name}(benchmark::State& state) {{
{setup}
for (auto _ : state) {{
{var_conversions}
auto start = std::chrono::high_resolution_clock::now();
{code}
auto end = std::chrono::high_resolution_clock::now();
auto elapsed_seconds =
std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed_seconds.count());
stan::math::recover_memory();
benchmark::ClobberMemory();
}}
}}
BENCHMARK({benchmark_name})->RangeMultiplier({multi})->Range(1, {max_size})->UseManualTime();
"""
CUSTOM_MAIN = """
int main(int argc, char** argv)
{{
stan::math::ChainableStack::instance_->memalloc_.alloc({});
stan::math::recover_memory();
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();
}}
"""
overload_scalar = {
"Prim": "double",
"Rev": "stan::math::var",
"Fwd": "stan::math::fvar<double>",
"Mix": "stan::math::fvar<stan::math::var>",
}
def run_command(command):
"""
Runs given command and waits until it finishes executing.
:param command: command to execute
"""
print()
print(" ".join(command))
p1 = subprocess.Popen(command)
if p1.wait() != 0:
raise RuntimeError("command failed: " + " ".join(command))
def build(exe_filepath):
"""
Builds a file using make.
:param exe_filepath: File to build
"""
run_command([make, exe_filepath])
def run_benchmark(exe_filepath, n_repeats=1, csv_out_file=None):
"""
Runs a benchmark
:param exe_filepath: path to the benchmark executable
:param n_repeats: how many times to repeat each benchmark
:param csv_out_file: path to csv fle to store benchmark results into
"""
command = [exe_filepath]
if n_repeats > 1:
command.append("--benchmark_repetitions={}".format(n_repeats))
command.append("--benchmark_display_aggregates_only=true")
if csv_out_file is not None:
command.append("--benchmark_out={}".format(csv_out_file))
command.append("--benchmark_out_format=csv")
run_command(command)
def pick_color(n):
str_bit_reversed_n = "{:015b}".format(n + 1)[::-1]
r = 0.9 * ((int(str_bit_reversed_n[0::3], 2) / 2.0 ** 5 + 0.3) % 1)
g = 0.9 * ((int(str_bit_reversed_n[1::3], 2) / 2.0 ** 5 + 0.3) % 1)
b = 0.9 * ((int(str_bit_reversed_n[2::3], 2) / 2.0 ** 5 + 0.3) % 1)
return r, g, b
def plot_results(csv_filename, out_file="", plot_log_y=False):
"""
Plots benchmark results.
:param csv_filename: path to csv file containing results to plot
:param out_file: path to image file to store figure into. If it equals to "window" opens it in an interactive window.
"""
import pandas
import numpy
import matplotlib
if out_file != "window":
matplotlib.use("Agg")
import matplotlib.pyplot as plt
with open(csv_filename) as f:
# google benchmark writes some non-csv data at beginning
for line in iter(f.readline, ""):
if line.startswith("name,iterations"):
f.seek(f.tell() - len(line) - 1, os.SEEK_SET)
break
data = pandas.read_csv(f)
name_split = data["name"].str.split("/", expand=True)
timing_data = pandas.concat(
[name_split.iloc[:, :2], data["real_time"]],
axis=1,
).loc[name_split.iloc[:, 2]=="manual_time", :]
timing_data.columns = ["signatures", "sizes", "times"]
timing_data.loc[:, "sizes"] = timing_data["sizes"].astype(int)
timing_data.loc[:, "times"] /= 1000 # convert to microseconds
fig, ax = plt.subplots(figsize=(10, 10))
fig.set_tight_layout(True)
ax.set_xscale("log")
if plot_log_y:
ax.set_yscale("log")
ax.set_xlabel("size")
ax.set_ylabel("time[us]")
for n, (signature, sub_data) in enumerate(timing_data.groupby("signatures")):
avg_sig_times = (
sub_data.groupby(by="sizes")["times"]
.median()
.reset_index()
.sort_values(by="sizes")
)
ax.plot(
avg_sig_times["sizes"],
avg_sig_times["times"],
label=signature,
color=pick_color(n),
)
for n, (signature, sub_data) in enumerate(timing_data.groupby("signatures")):
ax.plot(
sub_data["sizes"],
sub_data["times"],
"x",
color=pick_color(n),
label="_nolegend_",
scaley=False,
)
[
spine.set_visible(False)
for loc, spine in ax.spines.items()
if loc in ["top", "right", "left", "bottom"]
]
ax.minorticks_off()
ax.grid()
ax.legend()
if out_file == "window":
plt.show()
else:
fig.savefig(out_file, bbox_inches="tight", dpi=300)
def plot_compare(csv_filename, reference_csv_filename, out_file="", plot_log_y=False):
"""
Plots benchmark speedup compared to reference results.
:param csv_filename: path to csv file containing results to plot
:param reference_csv_filename: path to csv file containing reference results to plot
:param out_file: path to image file to store figure into. If it equals to "window" opens it in an interactive window.
"""
import pandas, numpy, matplotlib
if out_file != "window":
matplotlib.use("Agg")
import matplotlib.pyplot as plt
with open(csv_filename) as f:
# google benchmark writes some non-csv data at beginning
for line in iter(f.readline, ""):
if line.startswith("name,iterations"):
f.seek(f.tell() - len(line) - 1, os.SEEK_SET)
break
data = pandas.read_csv(f)
with open(reference_csv_filename) as f:
# google benchmark writes some non-csv data at beginning
for line in iter(f.readline, ""):
if line.startswith("name,iterations"):
f.seek(f.tell() - len(line) - 1, os.SEEK_SET)
break
reference_data = pandas.read_csv(f)
name_split = data["name"].str.split("/", expand=True)
timing_data = pandas.concat(
[name_split.iloc[:, :2], data["real_time"]],
axis=1,
).loc[name_split.iloc[:, 2]=="manual_time", :]
reference_name_split = reference_data["name"].str.split("/", expand=True)
reference_timing_data = pandas.concat(
[
reference_name_split.iloc[:, :2],
reference_data["real_time"],
],
axis=1,
).loc[reference_name_split.iloc[:, 2]=="manual_time", :]
timing_data.columns = reference_timing_data.columns = [
"signatures",
"sizes",
"times",
]
same_in_last_selector = reference_timing_data["signatures"].isin(
timing_data["signatures"]
)
reference_timing_data = reference_timing_data.loc[same_in_last_selector, :]
assert (reference_timing_data["signatures"] == timing_data["signatures"]).all()
assert (reference_timing_data["sizes"] == timing_data["sizes"]).all()
timing_data["speedup"] = reference_timing_data["times"] / timing_data["times"]
timing_data["sizes"] = timing_data["sizes"].astype(int)
fig, ax = plt.subplots(figsize=(10, 10))
fig.set_tight_layout(True)
ax.set_xscale("log")
if plot_log_y:
ax.set_yscale("log")
ax.set_xlabel("size")
ax.set_ylabel("speedup")
for n, (signature, sub_data) in enumerate(timing_data.groupby("signatures")):
avg_sig_speedups = (
sub_data.groupby(by="sizes")["speedup"]
.median()
.reset_index()
.sort_values(by="sizes")
)
ax.plot(
avg_sig_speedups["sizes"],
avg_sig_speedups["speedup"],
label=signature,
color=pick_color(n),
)
plt.plot([1, max(timing_data["sizes"])], [1, 1], "--", color="gray")
for n, (signature, sub_data) in enumerate(timing_data.groupby("signatures")):
ax.plot(
sub_data["sizes"],
sub_data["speedup"],
"x",
color=pick_color(n),
label="_nolegend_",
scaley=False,
)
[
spine.set_visible(False)
for loc, spine in ax.spines.items()
if loc in ["top", "right", "left", "bottom"]
]
ax.minorticks_off()
ax.grid()
ax.legend()
if out_file == "window":
plt.show()
else:
fig.savefig(out_file, bbox_inches="tight", dpi=300)
def benchmark(
functions_or_sigs,
cpp_filename="benchmark.cpp",
overloads=("Prim", "Rev"),
multiplier_param=None,
max_size_param=None,
max_dim=3,
n_repeats=1,
skip_similar_signatures=False,
csv_out_file=None,
opencl=False,
varmat=False,
):
"""
Generates benchmark code, compiles it and runs the benchmark.
:param functions_or_sigs: List of function names and/or signatures to benchmark
:param cpp_filename: filename of cpp file to use
:param overloads: Which overloads to benchmark
:param multiplier_param: Multiplyer, by which to increase argument size.
:param max_size_param: Maximum argument size.
:param max_dim: Maximum number of argument dimensions to benchmark. Signatures with any argument with
larger number of dimensions are skipped."
:param n_repeats: Number of times to repeat each benchmark.
:param skip_similar_signatures: Whether to skip similar signatures. Two signatures are similar if they
difffer only in similar vector types, which are vector, row_vector and real[].
:param csv_out_file: Filename of the csv file to store benchmark results in.
"""
all_signatures = get_signatures()
functions, signatures = handle_function_list(functions_or_sigs)
functions = set(functions)
signatures = set(signatures)
remaining_functions = set(functions)
parsed_signatures = []
ref_signatures = set()
for signature in all_signatures:
return_type, function_name, stan_args = parse_signature(signature)
reference_args = tuple(reference_vector_argument(i) for i in stan_args)
if (
skip_similar_signatures
and (function_name, reference_args) in ref_signatures
):
continue
if (signature in signatures) or (function_name in functions):
parsed_signatures.append([return_type, function_name, stan_args])
remaining_functions.discard(function_name)
ref_signatures.add((function_name, reference_args))
for signature in signatures:
return_type, function_name, stan_args = parse_signature(signature)
reference_args = tuple(reference_vector_argument(i) for i in stan_args)
if (
skip_similar_signatures
and (function_name, reference_args) in ref_signatures
):
continue
ref_signatures.add((function_name, reference_args))
parsed_signatures.append([return_type, function_name, stan_args])
remaining_functions.discard(function_name)
if remaining_functions:
raise NameError(
"Functions not found: " + ", ".join(sorted(remaining_functions))
)
result = ""
max_args_with_max_dimm = 0
default_max_size = 1024 * 1024 * 16
for return_type, function_name, stan_args in parsed_signatures:
dimm = 0
args_with_max_dimm = 0
for arg in stan_args:
arg_dimm = 0
if "vector" in arg:
arg_dimm = 1
if "matrix" in arg:
arg_dimm = 2
if "[" in arg:
arg_dimm += len(arg.split("]")[0].split("[")[1])
if arg_dimm == dimm:
args_with_max_dimm += 1
elif arg_dimm > dimm:
dimm = arg_dimm
args_with_max_dimm = 1
if dimm > max_dim:
continue
max_args_with_max_dimm = max(max_args_with_max_dimm, args_with_max_dimm)
if max_size_param is None:
if dimm == 0: # signature with only scalar arguments
max_size = 1
else:
max_size = default_max_size
max_size = int(max_size ** (1.0 / dimm))
else:
max_size = max_size_param
if multiplier_param is None:
multiplier = 4
if dimm >= 2:
multiplier = 2
else:
multiplier = multiplier_param
cpp_arg_templates = []
overload_opts = []
for n, stan_arg in enumerate(stan_args):
cpp_arg_template = get_cpp_type(stan_arg)
arg_overload_opts = ["Prim"]
if "SCALAR" in cpp_arg_template and not (
function_name in non_differentiable_args
and n in non_differentiable_args[function_name]
):
arg_overload_opts = overloads
cpp_arg_templates.append(cpp_arg_template)
overload_opts.append(arg_overload_opts)
for arg_overloads in itertools.product(*overload_opts):
# generate one benchmark
benchmark_name = function_name
setup = ""
var_conversions = ""
if opencl in ("copy", "copy_rev") and return_type not in scalar_stan_types:
code = " auto res = stan::math::from_matrix_cl(stan::math::{}(".format(
function_name
)
else:
code = " auto res = stan::math::eval(stan::math::{}(".format(
function_name
)
for (
n,
(arg_overload, cpp_arg_template, stan_arg),
) in enumerate(zip(arg_overloads, cpp_arg_templates, stan_args)):
n_vec, inner_type = parse_array(stan_arg)
if n_vec:
benchmark_name += (
"_" + arg_overload + "_" + inner_type + str(n_vec)
)
else:
benchmark_name += "_" + arg_overload + "_" + stan_arg
scalar = overload_scalar[arg_overload]
arg_type = cpp_arg_template.replace("SCALAR", scalar)
var_name = "arg" + str(n)
make_arg_function = "make_arg"
is_argument_autodiff = "var" in arg_type
is_argument_scalar = stan_arg in scalar_stan_types
value = 0.4
if function_name in special_arg_values:
if isinstance(special_arg_values[function_name][n], str):
make_arg_function = make_special_arg_values[special_arg_values[function_name][n]]
elif isinstance(
special_arg_values[function_name][n], numbers.Number
):
value = special_arg_values[function_name][n]
if not is_argument_autodiff or (
not is_argument_scalar and (
opencl == "base" or varmat == "base" or make_arg_function != "make_arg"
)):
arg_type_prim = cpp_arg_template.replace("SCALAR", "double");
setup += (
" {} {} = stan::test::{}<{}>({}, state.range(0));\n".format(
arg_type_prim,
var_name,
make_arg_function,
arg_type_prim,
value,
)
)
if not is_argument_scalar:
if opencl == "base" or opencl == "copy_rev":
setup += " auto {} = stan::math::to_matrix_cl({});\n".format(
var_name + "_cl", var_name
)
var_name += "_cl"
if is_argument_autodiff:
var_conversions += (
" stan::math::var_value<stan::math::matrix_cl<double>> {}({});\n".format(
var_name + "_var", var_name)
)
var_name += "_var"
elif varmat == "base" and arg_overload == "Rev":
var_conversions += " stan::math::var_value<{}> {}({});\n".format(
arg_type_prim, var_name + "_varmat", var_name
)
var_name += "_varmat"
elif is_argument_autodiff: #rev
var_conversions += " {} {} = {};\n".format(
arg_type, var_name + "_var", var_name
)
var_name += "_var"
else:
var_conversions += (
" {} {} = stan::test::{}<{}>({}, state.range(0));\n".format(
arg_type,
var_name,
make_arg_function,
arg_type,
value,
)
)
if not is_argument_scalar:
if opencl == "base" or (opencl == "copy_rev" and not is_argument_autodiff):
var_conversions += (
" auto {} = stan::math::to_matrix_cl({});\n".format(
var_name + "_cl", var_name
)
)
var_name += "_cl"
elif varmat == "base" and arg_overload == "Rev":
var_conversions += (
" auto {} = stan::math::to_var_value({});\n".format(
var_name + "_varmat", var_name
)
)
var_name += "_varmat"
if (opencl == "copy" or opencl == "copy_rev" and is_argument_autodiff) and not is_argument_scalar:
code += "stan::math::to_matrix_cl({}), ".format(var_name)
elif (
varmat == "copy"
and not is_argument_scalar
and arg_overload == "Rev"
):
code += "stan::math::to_var_value({}), ".format(var_name)
else:
code += var_name + ", "
code = code[:-2] + "));\n"
if "Rev" in arg_overloads:
code += " stan::math::grad();\n"
if opencl == "base":
code += " stan::math::opencl_context.queue().finish();\n"
var_conversions += " stan::math::opencl_context.queue().finish();\n"
result += BENCHMARK_TEMPLATE.format(
benchmark_name=benchmark_name,
setup=setup,
var_conversions=var_conversions,
code=code,
multi=multiplier,
max_size=max_size,
)
cpp_filepath = os.path.join(WORKING_FOLDER, cpp_filename)
with open(cpp_filepath, "w") as f:
f.write("#include <benchmark/benchmark.h>\n")
f.write("#include <test/expressions/expression_test_helpers.hpp>\n\n")
f.write(result)
if "Rev" in overloads:
# estimate the amount of arena memory the benchmarks will need
DOUBLE_SIZE = 8
N_ARRAYS = 4 # vals, adjoints, pointers + 1 for anything else
f.write(
CUSTOM_MAIN.format(
(max_size_param or default_max_size)
* DOUBLE_SIZE
* N_ARRAYS
* (max_args_with_max_dimm + 1)
)
)
else:
f.write("BENCHMARK_MAIN();")
exe_filepath = cpp_filepath.replace(".cpp", exe_extension)
build(exe_filepath)
run_benchmark(exe_filepath, n_repeats, csv_out_file)
def main(
functions_or_sigs,
cpp_filename="benchmark.cpp",
overloads=("Prim", "Rev"),
multiplier_param=None,
max_size_param=None,
max_dim=3,
n_repeats=1,
skip_similar_signatures=False,
csv_out_file=None,
opencl=False,
varmat=False,
plot=False,
plot_log_y=False,
plot_speedup=False,
plot_reference=None,
):
"""
Generates benchmark code, compiles it and runs the benchmark. Optionally plots the results.
:param functions_or_sigs: List of function names and/or signatures to benchmark
:param cpp_filename: filename of cpp file to use
:param overloads: Which overloads to benchmark
:param multiplier_param: Multiplyer, by which to increase argument size.
:param max_size_param: Maximum argument size.
:param max_dim: Maximum number of argument dimensions to benchmark. Signatures with any argument with
larger number of dimensions are skipped."
:param n_repeats: Number of times to repeat each benchmark.
:param skip_similar_signatures: Whether to skip similar signatures. Two signatures are similar if they
difffer only in similar vector types, which are vector, row_vector and real[].
:param csv_out_file: Filename of the csv file to store benchmark results in.
:param plot: Filename of bmp or csv fle to store plot into. If filename is empty, opens a window with graph.
:param plot_log_y: Use logarithmic y axis for plotting
:param plot_speedup: plot speedup of OpenCL or varmat overloads compared to CPU ones
"""
if plot and csv_out_file is None:
csv_out_file = ".benchmark.csv"
if plot_speedup and (opencl or varmat):
if opencl:
special = "_cl"
else:
special = "_varmat"
opencl_csv_out_file = csv_out_file + special
if "." in csv_out_file:
base, ext = csv_out_file.rsplit(".", 1)
opencl_csv_out_file = base + special + "." + ext
benchmark(
functions_or_sigs,
cpp_filename,
overloads,
multiplier_param,
max_size_param,
max_dim,
n_repeats,
skip_similar_signatures,
csv_out_file,
False,
False,
)
benchmark(
functions_or_sigs,
cpp_filename,
overloads,
multiplier_param,
max_size_param,
max_dim,
n_repeats,
skip_similar_signatures,
opencl_csv_out_file,
opencl,
varmat,
)
plot_compare(opencl_csv_out_file, csv_out_file, plot)
else:
benchmark(
functions_or_sigs,
cpp_filename,
overloads,
multiplier_param,
max_size_param,
max_dim,
n_repeats,
skip_similar_signatures,
csv_out_file,
opencl,
varmat,
)
if plot_reference:
plot_compare(csv_out_file, plot_reference, plot, plot_log_y)
elif plot:
plot_results(csv_out_file, plot, plot_log_y)
class FullErrorMsgParser(ArgumentParser):
"""
Modified ArgumentParser that prints full error message on any error.
"""
def error(self, message):
sys.stderr.write("error: %s\n" % message)
self.print_help()
sys.exit(2)
def processCLIArgs():
"""
Define and process the command line interface to the benchmark.py script.
"""
parser = FullErrorMsgParser(
description="Generate and run_command benchmarks.",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"functions",
nargs="+",
type=str,
default=[],
help="Signatures and/or function names to benchmark.",
)
parser.add_argument(
"--overloads",
nargs="+",
type=str,
default=["Prim", "Rev"],
help="Which overload combinations to benchmark. Possible values: Prim, Rev, Fwd, Mix. Defaults to Prim and Rev.",
)
parser.add_argument(
"--multiplier",
type=int,
default=None,
help="Multiplyer, by which to increase argument size. Defaults to 4 for functions with "
"1-dimensional arguments and 2 for other functions.",
)
parser.add_argument(
"--max_size",
type=int,
default=None,
help="Maximum argument size. Defaults to (16000000)**(1/dimm), where dimm is the largest "
"number of dimensions of arguments.",
)
parser.add_argument(
"--max_dim",
type=int,
default=3,
help="Maximum number of argument dimensions to benchmark. Signatures with any argument with "
"larger number of dimensions are skipped.",
)
parser.add_argument(
"--cpp",
metavar="filename",
type=str,
default="benchmark.cpp",
help="Filename of the cpp file to generate.",
)
parser.add_argument(
"--repeats",
metavar="N",
type=int,
default=1,
help="Number of times to repeat each benchmark.",
)
parser.add_argument(
"--csv",
metavar="filename",
type=str,
default=None,
help="Filename of the csv file to store benchmark results in. By default does not store results.",
)
parser.add_argument(
"--plot",
metavar="filename",
type=str,
default=False,
help="Filename store plotted graph into. If filename equals to 'window', opens a window with the graph."
" Plotting requires matplotlib and pandas libraries. Default: no plotting.",
)
parser.add_argument(
"--plot_log_y",
default=False,
action="store_true",
help="Use logarithmic y axis when plotting.",
)
parser.add_argument(
"--opencl",
metavar="setting",
type=str,
default=False,
help="Benchmark OpenCL overloads. Possible values: "
"base - benchmark just the execution time, "
"copy - include argument copying time"
"copy_rev - include argument copying time for var arguments only",
)
parser.add_argument(
"--varmat",
metavar="setting",
type=str,
default=False,
help="Benchmark varmat overloads. Possible values: "
"base - benchmark just the execution time, "
"copy - include argument copying time",
)
parser.add_argument(
"--plot_speedup",
default=False,
action="store_true",
help="Plots speedup of OpenCL or varmat overloads compared to Eigen matvar ones. Can only be specified together "
"with both --plot and either --opencl or --varmat. Cannot be specified together with --plot_reference.",
)
parser.add_argument(
"--plot_reference",
metavar="filename",
type=str,
default=None,
help="Specify filename of reference run csv output. Plots speedup of this run compared to the reference. "
"Reference run must have all parameters the same as this one, except possibly --opencl, output files and "
"plotting parameters. Can only be specified together with --plot. Cannot be specified together with "
"--plot_cl_speedup.",
)
parser.add_argument(
"--skip_similar_signatures",
default=False,
action="store_true",
help="Skip similar signatures. Two signatures are similar if they"
"difffer only in similar vector types, which are vector, row_vector and real[].",
)
args = parser.parse_args()
assert not (args.opencl and args.varmat), ValueError(
"--opencl and --varmat cannot be specified at the same time!"
)
if args.plot_reference or args.plot_speedup or args.plot_log_y:
assert args.plot, ValueError(
"--plot is required if you specify any of --plot_reference, --plot_speedup, --plot_log_y!"
)
main(
functions_or_sigs=args.functions,
cpp_filename=args.cpp,
overloads=args.overloads,
multiplier_param=args.multiplier,
max_size_param=args.max_size,
max_dim=args.max_dim,
csv_out_file=args.csv,
n_repeats=args.repeats,
skip_similar_signatures=args.skip_similar_signatures,
plot=args.plot,
plot_log_y=args.plot_log_y,
opencl=args.opencl,
plot_speedup=args.plot_speedup,
plot_reference=args.plot_reference,
varmat=args.varmat,
)
if __name__ == "__main__":
processCLIArgs()
| bsd-3-clause |
antiface/mne-python | examples/preprocessing/plot_define_target_events.py | 19 | 3350 | """
============================================================
Define target events based on time lag, plot evoked response
============================================================
This script shows how to define higher order events based on
time lag between reference and target events. For
illustration, we will put face stimuli presented into two
classes, that is 1) followed by an early button press
(within 590 milliseconds) and followed by a late button
press (later than 590 milliseconds). Finally, we will
visualize the evoked responses to both 'quickly-processed'
and 'slowly-processed' face stimuli.
"""
# Authors: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.event import define_target_events
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
raw.info['bads'] += ['EEG 053'] # bads
# pick MEG channels
picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True,
include=include, exclude='bads')
###############################################################################
# Find stimulus event followed by quick button presses
reference_id = 5 # presentation of a smiley face
target_id = 32 # button press
sfreq = raw.info['sfreq'] # sampling rate
tmin = 0.1 # trials leading to very early responses will be rejected
tmax = 0.59 # ignore face stimuli followed by button press later than 590 ms
new_id = 42 # the new event id for a hit. If None, reference_id is used.
fill_na = 99 # the fill value for misses
events_, lag = define_target_events(events, reference_id, target_id,
sfreq, tmin, tmax, new_id, fill_na)
print(events_) # The 99 indicates missing or too late button presses
# besides the events also the lag between target and reference is returned
# this could e.g. be used as parametric regressor in subsequent analyses.
print(lag[lag != fill_na]) # lag in milliseconds
# #############################################################################
# Construct epochs
tmin_ = -0.2
tmax_ = 0.4
event_id = dict(early=new_id, late=fill_na)
epochs = mne.Epochs(raw, events_, event_id, tmin_,
tmax_, picks=picks, baseline=(None, 0),
reject=dict(mag=4e-12))
# average epochs and get an Evoked dataset.
early, late = [epochs[k].average() for k in event_id]
###############################################################################
# View evoked response
times = 1e3 * epochs.times # time in milliseconds
title = 'Evoked response followed by %s button press'
plt.clf()
ax = plt.subplot(2, 1, 1)
early.plot(axes=ax)
plt.title(title % 'late')
plt.ylabel('Evoked field (fT)')
ax = plt.subplot(2, 1, 2)
late.plot(axes=ax)
plt.title(title % 'early')
plt.ylabel('Evoked field (fT)')
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/datasets/svmlight_format.py | 30 | 18814 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False,
offset=0, length=-1):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
n_features is only required if ``offset`` or ``length`` are passed a
non-default value.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no ``offset`` or ``length`` is passed.
If ``offset`` or ``length`` are passed, the "auto" mode falls back
to ``zero_based=True`` to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : boolean, default False
If True, will return the query_id array for each file.
offset : integer, optional, default 0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : integer, optional, default -1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
y : ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id : array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id, offset, length))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id,
offset=0, length=-1):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id,
offset, length)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id,
offset, length)
# convert from array.array, give data the right dtype
if not multilabel:
labels = np.frombuffer(labels, np.float64)
data = np.frombuffer(data, actual_dtype)
indices = np.frombuffer(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = np.frombuffer(query, np.int64)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False,
offset=0, length=-1):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features : int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no offset or length is passed.
If offset or length are passed, the "auto" mode falls back
to zero_based=True to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : boolean, defaults to False
If True, will return the query_id array for each file.
offset : integer, optional, default 0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : integer, optional, default -1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
if (offset != 0 or length > 0) and zero_based == "auto":
# disable heuristic search to avoid getting inconsistent results on
# different segments of the file
zero_based = True
if (offset != 0 or length > 0) and n_features is None:
raise ValueError(
"n_features is required when offset or length is specified.")
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id),
offset=offset, length=length)
for f in files]
if (zero_based is False or
zero_based == "auto" and all(len(tmp[1]) and np.min(tmp[1]) > 0
for tmp in r)):
for _, indices, _, _, _ in r:
indices -= 1
n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| mit |
ryanjmccall/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cm.py | 70 | 5385 | """
This module contains the instantiations of color mapping classes
"""
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import *
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None
"""
if name is None: name = mpl.rcParams['image.cmap']
if lut is None: lut = mpl.rcParams['image.lut']
assert(name in datad.keys())
return colors.LinearSegmentedColormap(name, datad[name], lut)
class ScalarMappable:
"""
This is a mixin class to support scalar -> RGBA mapping. Handles
normalization and colormapping
"""
def __init__(self, norm=None, cmap=None):
"""
*norm* is an instance of :class:`colors.Normalize` or one of
its subclasses, used to map luminance to 0-1. *cmap* is a
:mod:`cm` colormap instance, for example :data:`cm.jet`
"""
self.callbacksSM = cbook.CallbackRegistry((
'changed',))
if cmap is None: cmap = get_cmap()
if norm is None: norm = colors.Normalize()
self._A = None
self.norm = norm
self.cmap = cmap
self.colorbar = None
self.update_dict = {'array':False}
def set_colorbar(self, im, ax):
'set the colorbar image and axes associated with mappable'
self.colorbar = im, ax
def to_rgba(self, x, alpha=1.0, bytes=False):
'''Return a normalized rgba array corresponding to *x*. If *x*
is already an rgb array, insert *alpha*; if it is already
rgba, return it unchanged. If *bytes* is True, return rgba as
4 uint8s instead of 4 floats.
'''
try:
if x.ndim == 3:
if x.shape[2] == 3:
if x.dtype == np.uint8:
alpha = np.array(alpha*255, np.uint8)
m, n = x.shape[:2]
xx = np.empty(shape=(m,n,4), dtype = x.dtype)
xx[:,:,:3] = x
xx[:,:,3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
return xx
except AttributeError:
pass
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin)==2):
vmin, vmax = vmin
if vmin is not None: self.norm.vmin = vmin
if vmax is not None: self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap
"""
if cmap is None: cmap = get_cmap()
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None: norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
| gpl-3.0 |
jamesrp/pyeq2 | Examples/GUI/devExamples/wxMatplotlibExample.py | 2 | 1093 |
# based on http://stackoverflow.com/questions/10737459/embedding-a-matplotlib-figure-inside-a-wxpython-panel
from numpy import arange, sin, pi
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
#from matplotlib.backends.backend_wx import NavigationToolbar2Wx # not used in this example
from matplotlib.figure import Figure
import wx
class CanvasPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
def draw(self):
t = arange(0.0, 3.0, 0.01)
s = sin(2 * pi * t)
self.axes.plot(t, s)
if __name__ == "__main__":
app = wx.App()
fr = wx.Frame(None, title='test')
panel = CanvasPanel(fr)
panel.draw()
fr.Show()
app.MainLoop()
| bsd-2-clause |
rvraghav93/scikit-learn | sklearn/mixture/tests/test_bayesian_mixture.py | 84 | 17929 | # Author: Wei Xue <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.special import gammaln
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.mixture.bayesian_mixture import _log_dirichlet_norm
from sklearn.mixture.bayesian_mixture import _log_wishart_norm
from sklearn.mixture import BayesianGaussianMixture
from sklearn.mixture.tests.test_gaussian_mixture import RandomData
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_greater_equal, ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
PRIOR_TYPE = ['dirichlet_process', 'dirichlet_distribution']
def test_log_dirichlet_norm():
rng = np.random.RandomState(0)
weight_concentration = rng.rand(2)
expected_norm = (gammaln(np.sum(weight_concentration)) -
np.sum(gammaln(weight_concentration)))
predected_norm = _log_dirichlet_norm(weight_concentration)
assert_almost_equal(expected_norm, predected_norm)
def test_log_wishart_norm():
rng = np.random.RandomState(0)
n_components, n_features = 5, 2
degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.
log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components))
expected_norm = np.empty(5)
for k, (degrees_of_freedom_k, log_det_k) in enumerate(
zip(degrees_of_freedom, log_det_precisions_chol)):
expected_norm[k] = -(
degrees_of_freedom_k * (log_det_k + .5 * n_features * np.log(2.)) +
np.sum(gammaln(.5 * (degrees_of_freedom_k -
np.arange(0, n_features)[:, np.newaxis])), 0))
predected_norm = _log_wishart_norm(degrees_of_freedom,
log_det_precisions_chol, n_features)
assert_almost_equal(expected_norm, predected_norm)
def test_bayesian_mixture_covariance_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
covariance_type = 'bad_covariance_type'
bgmm = BayesianGaussianMixture(covariance_type=covariance_type,
random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type, bgmm.fit, X)
def test_bayesian_mixture_weight_concentration_prior_type():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
bad_prior_type = 'bad_prior_type'
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=bad_prior_type, random_state=rng)
assert_raise_message(ValueError,
"Invalid value for 'weight_concentration_prior_type':"
" %s 'weight_concentration_prior_type' should be in "
"['dirichlet_process', 'dirichlet_distribution']"
% bad_prior_type, bgmm.fit, X)
def test_bayesian_mixture_weights_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 5, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of weight_concentration_prior
bad_weight_concentration_prior_ = 0.
bgmm = BayesianGaussianMixture(
weight_concentration_prior=bad_weight_concentration_prior_,
random_state=0)
assert_raise_message(ValueError,
"The parameter 'weight_concentration_prior' "
"should be greater than 0., but got %.3f."
% bad_weight_concentration_prior_,
bgmm.fit, X)
# Check correct init for a given value of weight_concentration_prior
weight_concentration_prior = rng.rand()
bgmm = BayesianGaussianMixture(
weight_concentration_prior=weight_concentration_prior,
random_state=rng).fit(X)
assert_almost_equal(weight_concentration_prior,
bgmm.weight_concentration_prior_)
# Check correct init for the default value of weight_concentration_prior
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(1. / n_components, bgmm.weight_concentration_prior_)
def test_bayesian_mixture_means_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 3, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of mean_precision_prior
bad_mean_precision_prior_ = 0.
bgmm = BayesianGaussianMixture(
mean_precision_prior=bad_mean_precision_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% bad_mean_precision_prior_,
bgmm.fit, X)
# Check correct init for a given value of mean_precision_prior
mean_precision_prior = rng.rand()
bgmm = BayesianGaussianMixture(
mean_precision_prior=mean_precision_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_)
# Check correct init for the default value of mean_precision_prior
bgmm = BayesianGaussianMixture(random_state=rng).fit(X)
assert_almost_equal(1., bgmm.mean_precision_prior_)
# Check raise message for a bad shape of mean_prior
mean_prior = rng.rand(n_features + 1)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
bgmm.fit, X)
# Check correct init for a given value of mean_prior
mean_prior = rng.rand(n_features)
bgmm = BayesianGaussianMixture(n_components=n_components,
mean_prior=mean_prior,
random_state=rng).fit(X)
assert_almost_equal(mean_prior, bgmm.mean_prior_)
# Check correct init for the default value of bemean_priorta
bgmm = BayesianGaussianMixture(n_components=n_components,
random_state=rng).fit(X)
assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_)
def test_bayesian_mixture_precisions_prior_initialisation():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Check raise message for a bad value of degrees_of_freedom_prior
bad_degrees_of_freedom_prior_ = n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=bad_degrees_of_freedom_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'degrees_of_freedom_prior' should be "
"greater than %d, but got %.3f."
% (n_features - 1, bad_degrees_of_freedom_prior_),
bgmm.fit, X)
# Check correct init for a given value of degrees_of_freedom_prior
degrees_of_freedom_prior = rng.rand() + n_features - 1.
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior,
bgmm.degrees_of_freedom_prior_)
# Check correct init for the default value of degrees_of_freedom_prior
degrees_of_freedom_prior_default = n_features
bgmm = BayesianGaussianMixture(
degrees_of_freedom_prior=degrees_of_freedom_prior_default,
random_state=rng).fit(X)
assert_almost_equal(degrees_of_freedom_prior_default,
bgmm.degrees_of_freedom_prior_)
# Check correct init for a given value of covariance_prior
covariance_prior = {
'full': np.cov(X.T, bias=1) + 10,
'tied': np.cov(X.T, bias=1) + 5,
'diag': np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3,
'spherical': rng.rand()}
bgmm = BayesianGaussianMixture(random_state=rng)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.covariance_prior = covariance_prior[cov_type]
bgmm.fit(X)
assert_almost_equal(covariance_prior[cov_type],
bgmm.covariance_prior_)
# Check raise message for a bad spherical value of covariance_prior
bad_covariance_prior_ = -1.
bgmm = BayesianGaussianMixture(covariance_type='spherical',
covariance_prior=bad_covariance_prior_,
random_state=rng)
assert_raise_message(ValueError,
"The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% bad_covariance_prior_,
bgmm.fit, X)
# Check correct init for the default value of covariance_prior
covariance_prior_default = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()}
bgmm = BayesianGaussianMixture(random_state=0)
for cov_type in ['full', 'tied', 'diag', 'spherical']:
bgmm.covariance_type = cov_type
bgmm.fit(X)
assert_almost_equal(covariance_prior_default[cov_type],
bgmm.covariance_prior_)
def test_bayesian_mixture_check_is_fitted():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
# Check raise message
bgmm = BayesianGaussianMixture(random_state=rng)
X = rng.rand(n_samples, n_features)
assert_raise_message(ValueError,
'This BayesianGaussianMixture instance is not '
'fitted yet.', bgmm.score, X)
def test_bayesian_mixture_weights():
rng = np.random.RandomState(0)
n_samples, n_features = 10, 2
X = rng.rand(n_samples, n_features)
# Case Dirichlet distribution for the weight concentration prior type
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=3, random_state=rng).fit(X)
expected_weights = (bgmm.weight_concentration_ /
np.sum(bgmm.weight_concentration_))
assert_almost_equal(expected_weights, bgmm.weights_)
assert_almost_equal(np.sum(bgmm.weights_), 1.0)
# Case Dirichlet process for the weight concentration prior type
dpgmm = BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=3, random_state=rng).fit(X)
weight_dirichlet_sum = (dpgmm.weight_concentration_[0] +
dpgmm.weight_concentration_[1])
tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum
expected_weights = (dpgmm.weight_concentration_[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
expected_weights /= np.sum(expected_weights)
assert_almost_equal(expected_weights, dpgmm.weights_)
assert_almost_equal(np.sum(dpgmm.weights_), 1.0)
@ignore_warnings(category=ConvergenceWarning)
def test_monotonic_likelihood():
# We check that each step of the each step of variational inference without
# regularization improve monotonically the training set of the bound
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=20)
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type=covar_type,
warm_start=True, max_iter=1, random_state=rng, tol=1e-4)
current_lower_bound = -np.infty
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_lower_bound = current_lower_bound
current_lower_bound = bgmm.fit(X).lower_bound_
assert_greater_equal(current_lower_bound, prev_lower_bound)
if bgmm.converged_:
break
assert(bgmm.converged_)
def test_compare_covar_type():
# We can compare the 'full' precision with the other cov_type if we apply
# 1 iter of the M-step (done during _initialize_parameters).
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
X = rand_data.X['full']
n_components = rand_data.n_components
for prior_type in PRIOR_TYPE:
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='full',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
full_covariances = (
bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis])
# Check tied_covariance = mean(full_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='tied',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(tied_covariance, np.mean(full_covariances, 0))
# Check diag_covariance = diag(full_covariances)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='diag',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
diag_covariances = (bgmm.covariances_ *
bgmm.degrees_of_freedom_[:, np.newaxis])
assert_almost_equal(diag_covariances,
np.array([np.diag(cov)
for cov in full_covariances]))
# Check spherical_covariance = np.mean(diag_covariances, 0)
bgmm = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=2 * n_components, covariance_type='spherical',
max_iter=1, random_state=0, tol=1e-7)
bgmm._check_initial_parameters(X)
bgmm._initialize_parameters(X, np.random.RandomState(0))
spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_
assert_almost_equal(
spherical_covariances, np.mean(diag_covariances, 1))
@ignore_warnings(category=ConvergenceWarning)
def test_check_covariance_precision():
# We check that the dot product of the covariance and the precision
# matrices is identity.
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components, n_features = 2 * rand_data.n_components, 2
# Computation of the full_covariance
bgmm = BayesianGaussianMixture(n_components=n_components,
max_iter=100, random_state=rng, tol=1e-3,
reg_covar=0)
for covar_type in COVARIANCE_TYPE:
bgmm.covariance_type = covar_type
bgmm.fit(rand_data.X[covar_type])
if covar_type == 'full':
for covar, precision in zip(bgmm.covariances_, bgmm.precisions_):
assert_almost_equal(np.dot(covar, precision),
np.eye(n_features))
elif covar_type == 'tied':
assert_almost_equal(np.dot(bgmm.covariances_, bgmm.precisions_),
np.eye(n_features))
elif covar_type == 'diag':
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones((n_components, n_features)))
else:
assert_almost_equal(bgmm.covariances_ * bgmm.precisions_,
np.ones(n_components))
@ignore_warnings(category=ConvergenceWarning)
def test_invariant_translation():
# We check here that adding a constant in the data change correctly the
# parameters of the mixture
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=100)
n_components = 2 * rand_data.n_components
for prior_type in PRIOR_TYPE:
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
bgmm1 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X)
bgmm2 = BayesianGaussianMixture(
weight_concentration_prior_type=prior_type,
n_components=n_components, max_iter=100, random_state=0,
tol=1e-3, reg_covar=0).fit(X + 100)
assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100)
assert_almost_equal(bgmm1.weights_, bgmm2.weights_)
assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_)
| bsd-3-clause |
mhue/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
willkara/HealthAnalytics | 4_data_collection/Simulations/Correlated_Gen4_100/Stat_Aggregator.py | 4 | 3240 | import pandas as pd
from math import floor
from math import ceil
state_abb = [ 'AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FL', \
'GA', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', \
'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', \
'NJ', 'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'PR', \
'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', \
'WI', 'WY']
gender = []
ethni = []
# Aggregating the Gender Precentages in the DataFrame
repeat = '_SampleData.csv'
print "Doing Aggregation for Gender"
for state in range(len(state_abb)):
filename = state_abb[ state ] + repeat
df = pd.read_csv(filename)
df.pop('Unnamed: 0')
total_value = float(len(df.index))
unique = ['M', 'F']
tmpList = []
for value in unique:
count_value = float(df[df['Gender'] == value]['Gender'].count())
tmpList.append( count_value / total_value )
gender.append(tmpList)
df_out = pd.DataFrame(gender, index=state_abb)
df_out.columns = unique
df_out.to_csv('Gender_Summary.csv')
print "Doing Aggregation for Ethnicity"
for state in range(len(state_abb)):
filename = state_abb[ state ] + repeat
df = pd.read_csv(filename)
df.pop('Unnamed: 0')
total_value = float(len(df.index))
unique = ['WhiteNonHisp', 'Hisp', 'Black', 'Asian', 'NavAm', 'PacIs', 'Mixed', 'Other']
tmpList = []
for value in unique:
count_value = float(df[df['Ethnicity'] == value]['Ethnicity'].count())
tmpList.append( count_value / total_value )
ethni.append(tmpList)
df_out = pd.DataFrame(ethni, index=state_abb)
df_out.columns = unique
df_out.to_csv('Ethnicity_Summary.csv')
predictor = ['Age_years', 'Height', 'Waist', 'Weight', 'BMI', 'WtHR', 'HR', \
'Avg_Sys', 'Avg_Dia', 'Tri', 'LDL', 'HDL']
for key in range(len(predictor)):
continuous = []
print "Doing Aggregation for ", predictor[ key ]
curr_prd = predictor[ key ]
for state in range(len(state_abb)):
filename = state_abb[ state ] + repeat
df = pd.read_csv(filename)
df.pop('Unnamed: 0')
total_value = float(len(df.index))
min_value = floor(df[ curr_prd ].min())
max_value = ceil(df[ curr_prd ].max())
average = df[ curr_prd ].mean()
std = df[ curr_prd ].std()
tmpList = []
tmpList.append(average)
tmpList.append(std)
tmpList.append(min_value)
tmpList.append(max_value)
slope = (max_value - min_value) / 20
bins = [(min_value + (slope * float(i))) for i in range(21)]
for j in range(20):
count_value = float(df[(df[ curr_prd ] >= bins[ j ]) \
& (df[ curr_prd ] < bins[ j + 1 ])][ curr_prd ].count())
tmpList.append( count_value / total_value )
continuous.append(tmpList)
df_out = pd.DataFrame(continuous, index=state_abb)
columns = ['Avg', 'Std', 'Min', 'Max', 'Bin1', 'Bin2', 'Bin3', 'Bin4', 'Bin5', \
'Bin6', 'Bin7', 'Bin8', 'Bin9', 'Bin10', 'Bin11', 'Bin12', 'Bin13', \
'Bin14', 'Bin15', 'Bin16', 'Bin17', 'Bin18', 'Bin19', 'Bin20']
df_out.columns = columns
if predictor[ key ] == 'Age_years':
df_out.to_csv('Age_Summary.csv')
elif predictor[ key ] == 'Avg_Sys':
df_out.to_csv('Sys_Summary.csv')
elif predictor[ key ] == 'Avg_Dia':
df_out.to_csv('Dia_Summary.csv')
else:
df_out.to_csv(predictor[ key ] + '_Summary.csv')
| mit |
AustereCuriosity/astropy | astropy/nddata/ccddata.py | 1 | 21267 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from .compat import NDDataArray
from .nduncertainty import StdDevUncertainty, NDUncertainty
from ..io import fits, registry
from .. import units as u
from .. import log
from ..wcs import WCS
from ..utils.decorators import sharedmethod
__all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer']
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarly disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand,
operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = ("See `astropy.nddata.NDArithmeticMixin.{}`."
"".format(func.__name__))
return sharedmethod(inner)
return decorator
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, meta data, units, and WCS information for a
single CCD image.
Parameters
-----------
data : `~astropy.nddata.CCDData`-like or `numpy.ndarray`-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, `numpy.ndarray` or \
None, optional
Uncertainties on the data.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if 'meta' not in kwd:
kwd['meta'] = kwd.pop('header', None)
if 'header' in kwd:
raise ValueError("can't have both header and meta.")
super(CCDData, self).__init__(*args, **kwd)
# Check if a unit is set. This can be temporarly disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, '_parent_nddata', None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as "
"data.")
self._uncertainty = StdDevUncertainty(value)
log.info("array provided for uncertainty; assuming it is a "
"StdDevUncertainty.")
else:
raise TypeError("uncertainty must be an instance of a "
"NDUncertainty object or a numpy array.")
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, wcs_relax=True):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header['bunit'] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, 'shape'):
raise ValueError('only a numpy.ndarray mask can be saved.')
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
if self.uncertainty.__class__.__name__ != 'StdDevUncertainty':
raise ValueError('only StdDevUncertainty can be saved.')
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if (hasattr(self.uncertainty, 'unit') and
self.uncertainty.unit is not None and
self.uncertainty.unit != self.unit):
raise ValueError('saving uncertainties with a unit differing'
'from the data unit is not supported.')
hduUncert = fits.ImageHDU(self.uncertainty.array,
name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError('adding the flags to a HDU is not '
'supported at this time.')
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta['HIERARCH {0}'.format(key.upper())] = (
short_name, "Shortened name for {}".format(key))
self.meta[short_name] = value
else:
self.meta[key] = value
# This needs to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = [
'JD-OBS',
'MJD-OBS',
'DATE-OBS'
]
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object. This may generate a warning, but never
# an error.
wcs = WCS(hdr)
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT',
hdu_mask='MASK', hdu_flags=None, **kwd):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, optional
FITS extension from which CCDData should be initialized. If zero and
and no data in the primary extension, it will search for the first
extension with data. The header will be added to the primary header.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
'do_not_scale_image_data': 'Image data must be scaled.',
'scale_back': 'Scale information is not preserved.'
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = 'unsupported keyword: {0}.'.format(key)
raise TypeError(' '.join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
uncertainty = StdDevUncertainty(hdus[hdu_uncertainty].data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError('loading flags is currently not '
'supported.')
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if hdus.fileinfo(i)['datSpan'] > 0:
hdu = i
hdr = hdr + hdus[hdu].header
log.info("first HDU with data is extension "
"{0}.".format(hdu))
break
if 'bunit' in hdr:
fits_unit_string = hdr['bunit']
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == 'adu':
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if unit is not None and fits_unit_string:
log.info("using the unit {0} passed to the FITS reader instead of "
"the unit {1} in the FITS file.".format(unit,
fits_unit_string))
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit,
mask=mask, uncertainty=uncertainty, wcs=wcs)
return ccd_data
def fits_ccddata_writer(ccd_data, filename, hdu_mask='MASK',
hdu_uncertainty='UNCERT', hdu_flags=None, **kwd):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty,
hdu_flags=hdu_flags)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer('fits', CCDData, fits_ccddata_writer)
registry.register_identifier('fits', CCDData, fits.connect.is_fits)
try:
CCDData.read.__doc__ = fits_ccddata_reader.__doc__
except AttributeError:
CCDData.read.__func__.__doc__ = fits_ccddata_reader.__doc__
try:
CCDData.write.__doc__ = fits_ccddata_writer.__doc__
except AttributeError:
CCDData.write.__func__.__doc__ = fits_ccddata_writer.__doc__
| bsd-3-clause |
hjweide/rpi-datathon-2015 | utils.py | 1 | 2426 | #!/usr/bin/env python
import cPickle as pickle
import pandas as pd
import re
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from os import getcwd
from os.path import exists, join
# remove markup, stopwords, etc. from tweets
def tweet_to_words(tweet, min_length):
tweet_text = BeautifulSoup(tweet).get_text()
letters_only = re.sub('[^a-zA-Z]', ' ', tweet_text)
# convert to lowercase and get rid of all extra whitespace
words = letters_only.lower().split()
# it is faster to search a set than a list
stops = set(stopwords.words('english'))
meaningful_words = [w for w in words if w not in stops]
if len(meaningful_words) >= min_length:
return ' '.join(meaningful_words)
return None
# read tweets from the csv file
def read_tweets(datafile, filename, min_length=0):
# check if we have already saved this file to disk to save computation
if not exists(filename):
# some lines are bad, just skip them (we have enough tweets already!)
data = pd.read_csv(datafile, header=0, delimiter=',', quotechar='"', error_bad_lines=False, encoding='utf-8-sig')
num_tweets = data['ItemID'].size
print('cleaned tweets will be saved to %s' % (filename))
clean_tweets, clean_tweets_sentiment = [], []
for i in xrange(num_tweets):
if (i + 1) % 1000 == 0:
print(' cleaning tweet %d of %d' % (i + 1, num_tweets))
clean_tweet = tweet_to_words(data['SentimentText'][i], min_length)
if clean_tweet is not None:
clean_tweet_sentiment = data['Sentiment'][i]
clean_tweets.append(clean_tweet)
clean_tweets_sentiment.append(clean_tweet_sentiment)
# save the cleaned tweets to disk for future use
with open(filename, 'wb') as ofile:
pickle.dump((clean_tweets, clean_tweets_sentiment), ofile)
else:
print('loading cleaned tweets from disk: %s' % (filename))
with open(filename, 'rb') as ifile:
clean_tweets, clean_tweets_sentiment = pickle.load(ifile)
return clean_tweets, clean_tweets_sentiment
# example usage
if __name__ == '__main__':
root = getcwd()
tweetsfile = join(root, 'data', 'tweets.csv')
tweetsfile_clean = join(root, 'data', 'tweets_clean.pickle')
clean_tweets, clean_tweets_sentiment = read_tweets(tweetsfile, tweetsfile_clean)
| mit |
wolfiex/DSMACC-testing | dsmacc/parsekpp/reactiontypes.py | 1 | 3417 | name = "reformat"
#from ..helperscripts import picker
import glob,sys,os,re,pathos
import pandas as pd
import numpy as np
from sympy import Symbol, expand, N
def iseqn (x):
#find equations
if (re.search(r'\{[\. \s\d]*\}', x)):
return True
def pool_eqn(x):
#sort the reactions
r,p=x[0].split('=')
p=p.split('+')
p.sort()
r=r.split('+')
r.sort()
#p='+'.join(p)
#r='+'.join(r)
x[0] = [r,p]
#replace D and exp for sympy re.sub(r'(\d)[dD]([+-\.\d])',r'\1e\2', x[1].split('//')[0].replace('EXP','exp')
x[1] = x[1].split('//')[0].replace(';','')
return x
def categorise(x,join=True):
cat2 = 'Radicals/Other'
if 'RO2' in x[1]:
cat = re.search(r'RO2[\w]*\b',x[1]).group()
cat2 = 'RO2'
elif 'J(' in x[1]:
cat = 'hv'
cat2 = 'Photolysis'
elif '*O2' in x[1] :
cat = 'O2'
cat2 = 'Decomposition'
elif 'H2O' in x[1] :
cat = 'H2O'
else:
radical = set(x[0][0]) & set('OH,HO2,NO,NO2,NO3,Cl,CL,O3'.split(','))
if len(radical):
cat = list(radical)[0]
else:
try: cat = re.search(r'K[\w]*\b',x[1]).group()
except: cat = 'Uni-molecular'
cat2 = 'Decomposition'
if join:
return ['->'.join(['+'.join(i) for i in x[0] ]) , x[1] , cat,cat2]
else:
return [x[0][0],x[0][1], x[1] , cat,cat2]
def reformat_kpp(file_list = False,findcat = True,join=True ,inorganics=False,available_cores = 1):
if not file_list:
#read files from picker
file_list = picker.Picker('mechanisms/[!(formatted)]*.kpp',remove=['mechanisms/','.kpp'],title = 'Select Mechanisms').getSelected()
file_text = [open('mechanisms/%s.kpp'%i,'r').read() for i in file_list]
if inorganics:
file_list.append('inorganics')
file_text.append(open('src/background/inorganic_mcm.kpp','r').read())
if file_list == ['inorganics']: sys.exit('You forgot to enter a file to reformat')
fullstr='~'.join(file_text)
else:
#read given files
file_text = [open('mechanisms/%s.kpp'%i,'r').read() for i in file_list]
fullstr='~'.join(file_text)
minfull = re.sub(r' |\n|\t|\s|\r','', fullstr).upper()
eqn = [i.split(':') for i in re.findall(r'[^/]{1,2}\s*\{[\.\W\s\d]*?\}([^;]+)' ,' '+minfull,re.S|re.M)]
nocoeff = re.compile(r'\b\d*\.*\d*([\W\d\w]+)\b')
specs = []
for e in eqn: specs.extend(re.findall(r"[\w']+", e[0]))
specs = list(set((nocoeff.sub(r'\1',i) for i in specs)))
specs.sort()
eqn = list(map(pool_eqn,eqn))
if findcat:
cat2 = lambda p: categorise(p, join)
eqn = list(map(cat2, eqn))
if join:
jn = 'eqn,rate,category,group'
else:
jn = 'from,to,rate,category,group'
#print eqn
return pd.DataFrame(eqn,columns=jn.split(','))
else:
return eqn
if __name__ == '__main__':
print ('lets go - quickstart test of propane.kpp')
ret = reformat_kpp(['formatted_butane_inorganics_True.kpp'])
tally = ret.groupby(['category','group']).count()
#filter lone reactions
tally = tally[tally.eqn>1]
tally.index = ['_'.join([j,i]).strip() for i,j in tally.index.values]
tally.sort_index(inplace=True)
print((tally['eqn'].to_json()))
print((set(ret.group)))
#tally /= tally.sum()
| gpl-3.0 |
SENeC-Initiative/PyNeurActiv | plot/raster.py | 2 | 7089 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2017 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# nest_plot.py
# This file is part of the NNGT module
# Distributed as a free software, in the hope that it will be useful, under the
# terms of the GNU General Public License.
""" Utility functions to plot Nactivity """
from itertools import cycle
import numpy as np
from PyNeurActiv.analysis import (ActivityRecord, get_b2, firing_rate,
neurons_sorter)
from .plot_tools import _markers
from ..lib import nonstring_container
#-----------------------------------------------------------------------------#
# Plotting the activity
#------------------------
#
def raster_plot(activity, network=None, limits=None, sort=None, normalize=1.,
decimate=None, hist=True, mark_network_bursts=True,
mark_spike_patterns=True, title=None, axis=None, label=None,
markers=None, show=False):
'''
Plot the monitored activity.
Parameters
----------
activity : :class:`~PyNeurActiv.analysis.ActivityRecord` or (N, 2) array
Recorded spikes and senders, either from `raster_analysis` or directly
from a 2d array. If an `ActivityRecord` is provided, then the names
of the columns should be stated if they differ from default
'neuron'/'time'.
network : :class:`~nngt.Network` or subclass, optional (default: None)
Network for which the activity was monitored.
limits : tuple, optional (default: None)
Time limits of the plot (if not specified, times of first and last
spike for raster plots).
show : bool, optional (default: True)
Whether to show the plot right away or to wait for the next plt.show().
sort : str or list, optional (default: None)
Sorting method that will be used to attribute a new index to each
neuron. See :func:`~PyNeurActiv.analysis.neurons_sorter`.
normalize : float, optional (default: None)
Normalize the recorded results by a given float.
decimate : int or list of ints, optional (default: None)
Represent only a fraction of the spiking neurons; only one neuron in
`decimate` will be represented (e.g. setting `decimate` to 5 will lead
to only 20% of the neurons being represented). If a list is provided,
it must have one entry per NeuralGroup in the population.
hist : bool, optional (default: True)
Whether to display the histogram when plotting spikes rasters.
title : str, optional (default: None)
Title of the plot.
fignum : int, optional (default: None)
Plot the activity on an existing figure (from ``figure.number``).
label : str, optional (default: None)
Add a label to the plot.
Warning
-------
Sorting with "firing_rate" only works if NEST gids form a continuous
integer range.
Returns
-------
axis : list
List of the figure numbers.
'''
import matplotlib.pyplot as plt
if not isinstance(activity, ActivityRecord):
datadict = {'neuron': activity[:, 0], 'time': activity[:, 1]}
activity = ActivityRecord(datadict)
senders, times = activity.data.T
senders = senders.astype(int)
num_spikes = len(times)
neurons = np.unique(senders).astype(int)
num_neurons = len(neurons)
from itertools import cycle
colors = cycle(('r','g','b','k','y','gray'))
# markers
if markers is None and mark_network_bursts:
markers = _markers
elif not nonstring_container(markers):
markers = cycle([markers])
else:
markers = cycle(['o'])
# decimate if necessary
if decimate is not None:
idx_keep = np.where(np.mod(senders, decimate) == 0)[0]
senders = senders[idx_keep]
times = times[idx_keep]
# sorting
sorter = np.arange(neurons[-1] + 1, dtype=int)
if sort == "spikes":
sorter = activity._sort
elif sort is not None:
sorter = neurons_sorter(
neurons, sort, data=activity.data, network=network)
if len(times):
if axis is None:
_, axis = plt.subplots()
ylabel = "Neuron ID"
xlabel = "Time (ms)"
show_burst = mark_spike_patterns * ('individual_burst' in activity)
cburst = 'r' if show_burst else 'b'
cspike = 'b'
delta_t = 0.01*(times[-1]-times[0])
burst = np.ones(num_spikes, dtype=bool)
interburst = np.zeros(num_spikes, dtype=bool)
iburst = np.ones(num_spikes, dtype=bool)
ispike = np.zeros(num_spikes, dtype=bool)
descriptor1, descriptor2 = np.ones(num_spikes), np.ones(num_spikes)
try:
num_events = int(np.nanmax(activity.array('network_burst')))
descriptor1 = activity.array('network_burst')
descriptor2 = activity.array('network_interburst')
except:
num_events = 1
if show_burst:
iburst = ~np.isnan(activity.array('individual_burst'))
for c, ev, m in zip(colors, range(num_events), markers):
burst = np.isclose(descriptor1, ev + 1)
interburst = np.isclose(descriptor2, ev + 1)
bb_idx = np.where(iburst & burst)[0]
bs_idx = np.where(~iburst & burst)[0]
axis.plot(times[bb_idx], sorter[senders[bb_idx]],
ls='', marker=m, c=cburst)
axis.plot(times[bs_idx], sorter[senders[bs_idx]],
ls='', marker=m, c=cspike)
# interburst (empty if no bursts)
ib_idx = np.where(iburst & interburst)[0]
is_idx = np.where(~iburst & interburst)[0]
axis.plot(times[ib_idx], sorter[senders[ib_idx]],
ls='', marker=m, c=cburst, fillstyle='none')
axis.plot(times[is_idx], sorter[senders[is_idx]],
ls='', marker=m, c=cspike, fillstyle='none')
axis.set_ylabel(ylabel)
axis.set_xlabel(xlabel)
if limits is not None:
axis.set_xlim(limits)
else:
axis.set_xlim([times[0]-delta_t, times[-1]+delta_t])
axis.legend(bbox_to_anchor=(1.1, 1.2))
if title is None:
title = 'Raster plot'
plt.title(title)
if show:
plt.show()
return axis
| gpl-3.0 |
yavuzovski/playground | machine learning/Udacity/ud120-projects/outliers/outlier_removal_regression.py | 1 | 2785 | #!/usr/bin/python
import random
import numpy
import matplotlib.pyplot as plt
import pickle
from outlier_cleaner import outlierCleaner
### load up some practice data with outliers in it
ages = pickle.load( open("practice_outliers_ages.pkl", "r") )
net_worths = pickle.load( open("practice_outliers_net_worths.pkl", "r") )
### ages and net_worths need to be reshaped into 2D numpy arrays
### second argument of reshape command is a tuple of integers: (n_rows, n_columns)
### by convention, n_rows is the number of data points
### and n_columns is the number of features
ages = numpy.reshape( numpy.array(ages), (len(ages), 1))
net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1))
from sklearn.cross_validation import train_test_split
ages_train, ages_test, net_worths_train, net_worths_test = train_test_split(ages, net_worths, test_size=0.1, random_state=42)
### fill in a regression here! Name the regression object reg so that
### the plotting code below works, and you can see what your regression looks like
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(ages_train, net_worths_train)
print("slope of the uncleaned data: {}".format(reg.coef_))
print("score of the uncleaned data: {}".format(reg.score(ages_test, net_worths_test)))
try:
plt.plot(ages, reg.predict(ages), color="blue")
except NameError:
pass
plt.scatter(ages, net_worths)
plt.show()
### identify and remove the most outlier-y points
cleaned_data = []
try:
predictions = reg.predict(ages_train)
cleaned_data = outlierCleaner( predictions, ages_train, net_worths_train )
except NameError:
print "your regression object doesn't exist, or isn't name reg"
print "can't make predictions to use in identifying outliers"
### only run this code if cleaned_data is returning data
if len(cleaned_data) > 0:
ages, net_worths, errors = zip(*cleaned_data)
ages = numpy.reshape( numpy.array(ages), (len(ages), 1))
net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1))
### refit your cleaned data!
try:
reg.fit(ages, net_worths)
print("slope of the cleaned data: {}".format(reg.coef_))
print("score of the cleaned data: {}".format(reg.score(ages_test, net_worths_test)))
plt.plot(ages, reg.predict(ages), color="blue")
except NameError:
print "you don't seem to have regression imported/created,"
print " or else your regression object isn't named reg"
print " either way, only draw the scatter plot of the cleaned data"
plt.scatter(ages, net_worths)
plt.xlabel("ages")
plt.ylabel("net worths")
plt.show()
else:
print "outlierCleaner() is returning an empty list, no refitting to be done"
| gpl-3.0 |
pbvarga1/pdsview | tests/test_histogram.py | 3 | 11774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import numpy as np
from qtpy import QtWidgets, QtCore
from matplotlib.lines import Line2D
from pdsview import pdsview, histogram
FILE_1 = os.path.join(
'tests', 'mission_data', '1p190678905erp64kcp2600l8c1.img')
FILE_2 = os.path.join(
'tests', 'mission_data', '2p129641989eth0361p2600r8m1.img')
FILE_3 = os.path.join(
'tests', 'mission_data', '1p134482118erp0902p2600r8m1.img')
test_images = pdsview.ImageSet([FILE_1, FILE_2])
window = pdsview.PDSViewer(test_images)
image_view = window.view_canvas
def test_model_init():
model = histogram.HistogramModel(image_view)
assert model._image_view == image_view
assert model._views == set()
assert model._cut_low is None
assert model._cut_high is None
assert model._bins == 100
def test_model_image_view():
image_view = window.view_canvas
model = histogram.HistogramModel(image_view)
model.image_view == image_view
model.image_view == model._image_view
# Test setter method
image_view2 = pdsview.PDSViewer(pdsview.ImageSet([FILE_3])).view_canvas
model.image_view = image_view2
assert model.image_view == image_view2
def test_model_cut_low():
model = histogram.HistogramModel(image_view)
assert model.cut_low == model.view_cuts[0]
assert model.cut_low == model._cut_low
# Test Setting
model.cut_low = 42
assert model.cut_low == 42
assert model._cut_low == 42
assert model.view_cuts[0] == 42
def test_model_cut_high():
model = histogram.HistogramModel(image_view)
assert model.cut_high is model.view_cuts[1]
assert model.cut_high == model._cut_high
# Test Setting
model.cut_high = 42
assert model.cut_high == 42
assert model._cut_high == 42
assert model.view_cuts[1] == 42
def test_model_cuts():
def test_new_cuts(new_cuts, model):
model.cuts = new_cuts
assert model.cuts == new_cuts
assert model.cut_low == new_cuts[0]
assert model.cut_high == new_cuts[1]
assert model.view_cuts == new_cuts
model = histogram.HistogramModel(image_view)
assert model.cuts == model.view_cuts
# Test Setter
test_new_cuts((24, 42), model)
test_new_cuts((20, 42), model)
test_new_cuts((20, 40), model)
with pytest.warns(UserWarning):
model.cuts = 42, 24
assert model.cuts == (24, 42)
def test_model_view_cuts():
model = histogram.HistogramModel(image_view)
assert model.view_cuts == image_view.get_cut_levels()
def test_bins():
model = histogram.HistogramModel(image_view)
assert model.bins == model._bins
# Test Setter
model.bins = 42
assert model.bins == 42
assert model.bins == model._bins
def test_model_data():
model = histogram.HistogramModel(image_view)
assert np.array_equal(model.data, image_view.get_image().data)
def test_model_register():
model = histogram.HistogramModel(image_view)
mock_view = QtWidgets.QWidget()
model.register(mock_view)
assert mock_view in model._views
def test_model_unregister():
model = histogram.HistogramModel(image_view)
mock_view = QtWidgets.QWidget()
model.register(mock_view)
assert mock_view in model._views
model.unregister(mock_view)
assert mock_view not in model._views
def test_model_restore():
model = histogram.HistogramModel(image_view)
assert model.cuts == model.view_cuts
image_view.cut_levels(24, 42)
model.cuts = 10, 100
model.restore()
assert model.cuts == model.view_cuts
def test_model__set_view_cuts():
model = histogram.HistogramModel(image_view)
model._cut_low = 24
model._cut_high = 42
model._set_view_cuts()
assert model.view_cuts == (24, 42)
def test_controller_set_cut_low():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
test_controller = histogram.HistogramController(model, test_hist)
test_controller.set_cut_low(24)
assert model.cut_low == 24
assert model.view_cuts[0] == 24
def test_controller_set_cut_high():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
test_controller = histogram.HistogramController(model, test_hist)
test_controller.set_cut_high(42)
assert model.cut_high == 42
assert model.view_cuts[1] == 42
def test_controller_set_cuts():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
test_controller = histogram.HistogramController(model, test_hist)
test_controller.set_cuts(10, 100)
assert model.cut_low == 10
assert model.cut_high == 100
assert model.cuts == (10, 100)
assert model.view_cuts == (10, 100)
def test_controller_set_bins():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
test_controller = histogram.HistogramController(model, test_hist)
test_controller.set_bins(50)
assert model.bins == 50
def test_controller_restore():
model = histogram.HistogramModel(image_view)
def_cuts = model.view_cuts
test_hist = histogram.Histogram(model)
test_controller = histogram.HistogramController(model, test_hist)
model.cuts = 24, 42
image_view.cut_levels(*def_cuts)
test_controller.restore()
assert model.cuts != (24, 42)
assert model.cuts == def_cuts
assert model.view_cuts == def_cuts
def test_histogram_init():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
assert test_hist.model == model
assert test_hist in model._views
assert test_hist.sizePolicy().hasHeightForWidth()
assert test_hist._right_vline is None
assert test_hist._left_vline is None
def test_histogram_set_vlines():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
test_hist._set_vlines()
assert isinstance(test_hist._left_vline, Line2D)
assert isinstance(test_hist._right_vline, Line2D)
assert test_hist._left_vline.get_xdata()[0] == model.cut_low
assert test_hist._right_vline.get_xdata()[0] == model.cut_high
def test_histogram_change_cut_low():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
test_hist._set_vlines()
model._cut_low = 24
test_hist.change_cut_low(draw=False)
assert test_hist._left_vline.get_xdata()[0] == 24
assert test_hist._right_vline.get_xdata()[0] == model.cut_high
def test_histogram_change_cut_high():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
test_hist._set_vlines()
model._cut_high = 42
test_hist.change_cut_high(draw=False)
assert test_hist._right_vline.get_xdata()[0] == 42
assert test_hist._left_vline.get_xdata()[0] == model.cut_low
def test_histogram_change_cuts():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
test_hist._set_vlines()
model._cut_low = 24
model._cut_high = 42
test_hist.change_cuts()
assert test_hist._left_vline.get_xdata()[0] == 24
assert test_hist._right_vline.get_xdata()[0] == 42
def test_histogram_change_bins():
model = histogram.HistogramModel(image_view)
test_hist = histogram.Histogram(model)
test_hist.set_data()
assert model.bins == 100
assert len(test_hist._ax.patches) == 100
model._bins = 50
test_hist.change_bins()
assert len(test_hist._ax.patches) == 50
def get_xdata(ax, x):
xdata, _ = ax.transform((x, 10))
return xdata
# def test_histogram_move_line(qtbot):
"""Testing the move line is much more difficult than I thought
Passing in the correct data points is very tough and I can't
figure out exactly how to do so."""
def test_histogram_widget_change_cut_low():
model = histogram.HistogramModel(image_view)
test_hist_widget = histogram.HistogramWidget(model)
new_cut_low = model.cut_low - 3
model._cut_low = new_cut_low
test_hist_widget.change_cut_low()
assert float(test_hist_widget._cut_low_box.text()) == new_cut_low
new_cut_low += 1.2
model._cut_low = new_cut_low
test_hist_widget.change_cut_low()
assert float(test_hist_widget._cut_low_box.text()) == new_cut_low
def test_histogram_widget_change_cut_high():
model = histogram.HistogramModel(image_view)
test_hist_widget = histogram.HistogramWidget(model)
new_cut_high = model.cut_high + 3
model._cut_high = new_cut_high
test_hist_widget.change_cut_high()
assert float(test_hist_widget._cut_high_box.text()) == new_cut_high
new_cut_high -= 1.2
model._cut_high = new_cut_high
test_hist_widget.change_cut_high()
assert float(test_hist_widget._cut_high_box.text()) == new_cut_high
def test_histogram_widget_change_cuts():
model = histogram.HistogramModel(image_view)
test_hist_widget = histogram.HistogramWidget(model)
new_cut_high = model.cut_high + 3
model._cut_high = new_cut_high
new_cut_low = model.cut_low - 3
model._cut_low = new_cut_low
test_hist_widget.change_cuts()
assert float(test_hist_widget._cut_low_box.text()) == new_cut_low
assert float(test_hist_widget._cut_high_box.text()) == new_cut_high
new_cut_high -= 1.2
model._cut_high = new_cut_high
new_cut_low += 1.2
model._cut_low = new_cut_low
test_hist_widget.change_cuts()
assert float(test_hist_widget._cut_low_box.text()) == new_cut_low
assert float(test_hist_widget._cut_high_box.text()) == new_cut_high
def test_histogram_widget_change_bins():
model = histogram.HistogramModel(image_view)
test_hist_widget = histogram.HistogramWidget(model)
new_bins = model.bins + 20
model._bins = new_bins
test_hist_widget.change_bins()
assert int(test_hist_widget._bins_box.text()) == new_bins
def test_histogram_widget_keyPressEvent(qtbot):
window.show()
qtbot.addWidget(window.histogram_widget)
qtbot.addWidget(window)
# Change only cut low
new_cut_low = window.histogram.cut_low - 3
window.histogram_widget._cut_low_box.setText("%.3f" % (new_cut_low))
qtbot.keyPress(window.histogram_widget, QtCore.Qt.Key_Return)
assert window.histogram.cut_low == new_cut_low
# Change only cut high
new_cut_high = window.histogram.cut_high + 3
window.histogram_widget._cut_high_box.setText("%.3f" % (new_cut_high))
qtbot.keyPress(window.histogram_widget, QtCore.Qt.Key_Return)
assert window.histogram.cut_high == new_cut_high
# Change both cuts
new_cut_low += 1.5
new_cut_high -= 1.5
window.histogram_widget._cut_low_box.setText("%.3f" % (new_cut_low))
window.histogram_widget._cut_high_box.setText("%.3f" % (new_cut_high))
qtbot.keyPress(window.histogram_widget, QtCore.Qt.Key_Return)
assert window.histogram.cut_low == new_cut_low
assert window.histogram.cut_high == new_cut_high
# Change the bins
new_bins = window.histogram.bins + 50
window.histogram_widget._bins_box.setText("%d" % (new_bins))
qtbot.keyPress(window.histogram_widget, QtCore.Qt.Key_Return)
assert window.histogram.bins == new_bins
assert window.histogram.cut_low == new_cut_low
assert window.histogram.cut_high == new_cut_high
# Change all
new_cut_low += 1.5
new_cut_high -= 1.5
window.histogram_widget._cut_low_box.setText("%.3f" % (new_cut_low))
window.histogram_widget._cut_high_box.setText("%.3f" % (new_cut_high))
new_bins -= 25
window.histogram_widget._bins_box.setText("%d" % (new_bins))
qtbot.keyPress(window.histogram_widget, QtCore.Qt.Key_Return)
assert window.histogram.bins == new_bins
assert window.histogram.cut_low == new_cut_low
assert window.histogram.cut_high == new_cut_high
| bsd-3-clause |
avalentino/pyepr | doc/examples/export_gdalvrt.py | 1 | 5903 | #!/usr/bin/env python3
import os
import epr
from osgeo import gdal
epr_to_gdal_type = {
epr.E_TID_UNKNOWN: gdal.GDT_Unknown,
epr.E_TID_UCHAR: gdal.GDT_Byte,
epr.E_TID_CHAR: gdal.GDT_Byte,
epr.E_TID_USHORT: gdal.GDT_UInt16,
epr.E_TID_SHORT: gdal.GDT_Int16,
epr.E_TID_UINT: gdal.GDT_UInt32,
epr.E_TID_INT: gdal.GDT_Int32,
epr.E_TID_FLOAT: gdal.GDT_Float32,
epr.E_TID_DOUBLE: gdal.GDT_Float64,
#epr.E_TID_STRING: gdal.GDT_Unknown,
#epr.E_TID_SPARE: gdal.GDT_Unknown,
#epr.E_TID_TIME: gdal.GDT_Unknown,
}
def epr2gdal_band(band, vrt):
product = band.product
dataset = band.dataset
record = dataset.read_record(0)
field = record.get_field_at(band._field_index - 1)
ysize = product.get_scene_height()
xsize = product.get_scene_width()
if isinstance(vrt, gdal.Dataset):
if (vrt.RasterYSize, vrt.RasterXSize) != (ysize, xsize):
raise ValueError('dataset size do not match')
gdal_ds = vrt
elif os.path.exists(vrt):
gdal_ds = gdal.Open(vrt, gdal.GA_Update)
if gdal_ds is None:
raise RuntimeError('unable to open "{}"'.format(vrt))
driver = gdal_ds.GetDriver()
if driver.ShortName != 'VRT':
raise TypeError('unexpected GDAL driver ({}). '
'VRT driver expected'.format(driver.ShortName))
else:
driver = gdal.GetDriverByName('VRT')
if driver is None:
raise RuntimeError('unable to get driver "VRT"')
gdal_ds = driver.Create(vrt, xsize, ysize, 0)
if gdal_ds is None:
raise RuntimeError('unable to create "{}" dataset'.format(vrt))
filename = os.pathsep.join(product.file_path.split('/')) # denormalize
offset = dataset.get_dsd().ds_offset + field.get_offset()
line_offset = record.tot_size
pixel_offset = epr.get_data_type_size(field.get_type())
if band.sample_model == epr.E_SMOD_1OF2:
pixel_offset *= 2
elif band.sample_model == epr.E_SMOD_2OF2:
offset += pixel_offset
pixel_offset *= 2
options = [
'subClass=VRTRawRasterBand',
'SourceFilename={}'.format(filename),
'ImageOffset={}'.format(offset),
'LineOffset={}'.format(line_offset),
'PixelOffset={}'.format(pixel_offset),
'ByteOrder=MSB',
]
gtype = epr_to_gdal_type[field.get_type()]
ret = gdal_ds.AddBand(gtype, options=options)
if ret != gdal.CE_None:
raise RuntimeError(
'unable to add VRTRawRasterBand to "{}"'.format(vrt))
gdal_band = gdal_ds.GetRasterBand(gdal_ds.RasterCount)
gdal_band.SetDescription(band.description)
metadata = {
'name': band.get_name(),
'dataset_name': dataset.get_name(),
'dataset_description': dataset.description,
'lines_mirrored': str(band.lines_mirrored),
'sample_model': epr.get_sample_model_name(band.sample_model),
'scaling_factor': str(band.scaling_factor),
'scaling_offset': str(band.scaling_offset),
'scaling_method': epr.get_scaling_method_name(band.scaling_method),
'spectr_band_index': str(band.spectr_band_index),
'unit': band.unit if band.unit else '',
'bm_expr': band.bm_expr if band.bm_expr else '',
}
gdal_band.SetMetadata(metadata)
return gdal_ds
def epr2gdal(product, vrt, overwrite_existing=False):
if isinstance(product, str):
filename = product
product = epr.open(filename)
ysize = product.get_scene_height()
xsize = product.get_scene_width()
if os.path.exists(vrt) and not overwrite_existing:
raise ValueError('unable to create "{0}". Already exists'.format(vrt))
driver = gdal.GetDriverByName('VRT')
if driver is None:
raise RuntimeError('unable to get driver "VRT"')
gdal_ds = driver.Create(vrt, xsize, ysize, 0)
if gdal_ds is None:
raise RuntimeError('unable to create "{}" dataset'.format(vrt))
metadata = {
'id_string': product.id_string,
'meris_iodd_version': str(product.meris_iodd_version),
'dataset_names': ','.join(product.get_dataset_names()),
'num_datasets': str(product.get_num_datasets()),
'num_dsds': str(product.get_num_dsds()),
}
gdal_ds.SetMetadata(metadata)
mph = product.get_mph()
metadata = str(mph).replace(' = ', '=').split('\n')
gdal_ds.SetMetadata(metadata, 'MPH')
sph = product.get_sph()
metadata = str(sph).replace(' = ', '=').split('\n')
gdal_ds.SetMetadata(metadata, 'SPH')
for band in product.bands():
epr2gdal_band(band, gdal_ds)
# @TODO: set geographic info
return gdal_ds
if __name__ == '__main__':
filename = 'MER_LRC_2PTGMV20000620_104318_00000104X000_00000_00000_0001.N1'
vrtfilename = os.path.splitext(filename)[0] + '.vrt'
gdal_ds = epr2gdal(filename, vrtfilename)
with epr.open(filename) as product:
band_index = product.get_band_names().index('water_vapour')
band = product.get_band('water_vapour')
eprdata = band.read_as_array()
unit = band.unit
lines_mirrored = band.lines_mirrored
scaling_offset = band.scaling_offset
scaling_factor = band.scaling_factor
gdal_band = gdal_ds.GetRasterBand(band_index + 1)
vrtdata = gdal_band.ReadAsArray()
if lines_mirrored:
vrtdata = vrtdata[:, ::-1]
vrtdata = vrtdata * scaling_factor + scaling_offset
print('Max absolute error:', abs(vrtdata - eprdata).max())
# plot
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(2, 1, 1)
plt.imshow(eprdata)
plt.grid(True)
cb = plt.colorbar()
cb.set_label(unit)
plt.title('EPR data')
plt.subplot(2, 1, 2)
plt.imshow(vrtdata)
plt.grid(True)
cb = plt.colorbar()
cb.set_label(unit)
plt.title('VRT data')
plt.show()
| gpl-3.0 |
josauder/procedural_city_generation | procedural_city_generation/roadmap/config.py | 2 | 3544 | # -*- coding:utf-8 -*-
from __future__ import division
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import os
class Global_Lists:
def __init__(self):
self.vertex_list=[]
self.vertex_queue=[]
self.tree=None
def config():
"""
Starts the program up with all necessary things. Reads the inputs,
creates the Singleton objects properly, sets up the heightmap for later,
makes sure all Vertices in the axiom have the correct neighbor. Could
need a rework in which the Singletons are unified and not broken as they
are now.
Returns
-------
variables : Variables object
Singleton with all numeric values which are not to be changed at runtime
singleton.global_lists : singleton.global_lists object
Singleton with the Global Lists which will be altered at runtime
"""
import json
from collections import namedtuple
import os
import procedural_city_generation
from procedural_city_generation.additional_stuff.Singleton import Singleton
path=os.path.dirname(procedural_city_generation.__file__)
singleton=Singleton("roadmap")
#Creates Singleton-Variables object from namedtuple
from procedural_city_generation.roadmap.Vertex import Vertex, set_plotbool
#Creates Vertex objects from coordinates
singleton.axiom=[Vertex(np.array([float(x[0]), float(x[1])])) for x in singleton.axiom]
singleton.border=np.array([singleton.border_x, singleton.border_y])
set_plotbool(singleton.plot)
#Finds the longest possible length of a connection between to vertices
singleton.maxLength=max(singleton.radiallMax, singleton.gridlMax, singleton.organiclMax, singleton.minor_roadlMax, singleton.seedlMax)
import os
from procedural_city_generation.roadmap.config_functions.input_image_setup import input_image_setup
singleton.img, singleton.img2=input_image_setup(singleton.rule_image_name, singleton.density_image_name)
with open (path+"/temp/"+singleton.output_name+"_densitymap.txt", 'w') as f:
f.write(singleton.density_image_name.split(".")[0]+"diffused.png")
from procedural_city_generation.roadmap.config_functions.find_radial_centers import find_radial_centers
singleton.center=find_radial_centers(singleton)
singleton.center= [np.array([singleton.border[0]*((x[1]/singleton.img.shape[1])-0.5)*2, singleton.border[1]*(((singleton.img.shape[0]-x[0])/singleton.img.shape[0])-0.5)*2]) for x in singleton.center]
from procedural_city_generation.roadmap.config_functions.setup_heightmap import setup_heightmap
setup_heightmap(singleton, path)
singleton.global_lists=Global_Lists()
singleton.global_lists.vertex_list.extend(singleton.axiom)
singleton.global_lists.coordsliste=[x.coords for x in singleton.global_lists.vertex_list]
def setNeighbours(vertex):
""" Correctly Sets up the neighbors for a vertex from the axiom.
Parameters
----------
vertex : vertex Object
"""
d=np.inf
neighbour=None
for v in singleton.axiom:
if v is not vertex:
dneu=np.linalg.norm(v.coords-vertex.coords)
if dneu<d:
d=dneu
neighbour=v
vertex.neighbours=[neighbour]
for k in singleton.axiom:
setNeighbours(k)
from scipy.spatial import cKDTree
singleton.global_lists.tree=cKDTree(singleton.global_lists.coordsliste, leafsize=160)
return singleton
| mpl-2.0 |
RPGOne/Skynet | test_estimators.py | 2 | 2065 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import random
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
import skflow
class CustomOptimizer(tf.test.TestCase):
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
custom_optimizer = lambda x: tf.train.MomentumOptimizer(x, 0.9)
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=800,
learning_rate=exp_decay,
optimizer=custom_optimizer)
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| bsd-3-clause |
etherkit/OpenBeacon2 | client/macos/venv/lib/python3.8/site-packages/PyInstaller/hooks/hook-matplotlib.backends.py | 4 | 3224 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
from PyInstaller.compat import is_darwin
from PyInstaller.utils.hooks import (
eval_statement, exec_statement, logger)
def get_matplotlib_backend_module_names():
"""
List the names of all matplotlib backend modules importable under the
current Python installation.
Returns
----------
list
List of the fully-qualified names of all such modules.
"""
# Statement safely importing a single backend module.
import_statement = """
import os, sys
# Preserve stdout.
sys_stdout = sys.stdout
try:
# Redirect output printed by this importation to "/dev/null", preventing
# such output from being erroneously interpreted as an error.
with open(os.devnull, 'w') as dev_null:
sys.stdout = dev_null
__import__('%s')
# If this is an ImportError, print this exception's message without a traceback.
# ImportError messages are human-readable and require no additional context.
except ImportError as exc:
sys.stdout = sys_stdout
print(exc)
# Else, print this exception preceded by a traceback. traceback.print_exc()
# prints to stderr rather than stdout and must not be called here!
except Exception:
sys.stdout = sys_stdout
import traceback
print(traceback.format_exc())
"""
# List of the human-readable names of all available backends.
backend_names = eval_statement(
'import matplotlib; print(matplotlib.rcsetup.all_backends)')
# List of the fully-qualified names of all importable backend modules.
module_names = []
# If the current system is not OS X and the "CocoaAgg" backend is available,
# remove this backend from consideration. Attempting to import this backend
# on non-OS X systems halts the current subprocess without printing output
# or raising exceptions, preventing its reliable detection.
if not is_darwin and 'CocoaAgg' in backend_names:
backend_names.remove('CocoaAgg')
# For safety, attempt to import each backend in a unique subprocess.
for backend_name in backend_names:
module_name = 'matplotlib.backends.backend_%s' % backend_name.lower()
stdout = exec_statement(import_statement % module_name)
# If no output was printed, this backend is importable.
if not stdout:
module_names.append(module_name)
logger.info(' Matplotlib backend "%s": added' % backend_name)
else:
logger.info(' Matplotlib backend "%s": ignored\n %s' % (backend_name, stdout))
return module_names
# Freeze all importable backends, as PyInstaller is unable to determine exactly
# which backends are required by the current program.
hiddenimports = get_matplotlib_backend_module_names()
| gpl-3.0 |
ashhher3/scikit-learn | sklearn/linear_model/logistic.py | 6 | 55848 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import _check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_loss_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss, gradient and the Hessian.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return out, grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_loss_grad_hess(w, X, Y, alpha, sample_weight):
"""
Provides multinomial loss, gradient, and a function for computing hessian
vector product.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return loss, grad, hessp
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr'):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class can be either 'multinomial' or 'ovr'"
"got %s" % multi_class)
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers. got %s" % solver)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s cannot solve problems with "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("newton-cg and lbfgs solvers support only "
"l2 penalties, got %s penalty." % penalty)
if dual:
raise ValueError("newton-cg and lbfgs solvers support only "
"dual=False, got dual=%s" % dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if not coef.size in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size)
)
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1
)
)
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_loss_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_loss_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter
)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol
)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
if self.solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError(
"Logistic Regression supports only liblinear, newton-cg and "
"lbfgs solvers, Got solver=%s" % self.solver
)
if self.solver == 'liblinear' and self.multi_class == 'multinomial':
raise ValueError("Solver %s does not support a multinomial "
"backend." % self.solver)
if self.multi_class not in ['ovr', 'multinomial']:
raise ValueError("multi_class should be either ovr or multinomial "
"got %s" % self.multi_class)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol
)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so in general it is supposed to be faster.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape (n_folds, len(Cs_), n_features) or
(n_folds, len(Cs_), n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape (n_folds, len(Cs_), n_features) or
(n_folds, len(Cs_), n_features + 1) depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if self.solver != 'liblinear':
if self.penalty != 'l2':
raise ValueError("newton-cg and lbfgs solvers support only "
"l2 penalties.")
if self.dual:
raise ValueError("newton-cg and lbfgs solvers support only "
"the primal form.")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False)
if self.multi_class not in ['ovr', 'multinomial']:
raise ValueError("multi_class backend should be either "
"'ovr' or 'multinomial'"
" got %s" % self.multi_class)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning
)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = _check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight == 'auto'):
raise ValueError("class_weight provided should be a "
"dict or 'auto'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([
coefs_paths[i][best_indices[i]]
for i in range(len(folds))
], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
em-er-es/rollo | Scripts/computeposition/compute_position.py | 1 | 10329 | #!/bin/env python2
# -*- coding: utf-8 -*-
#%% Information
# 86805 - Software Architectures for Robotics
# Assignment 03 - Localization system for a wheeled humanoid robot
# Rabbia Asghar, Ernest Skrzypczyk
# Odometry model
# Inputs: Initial coordinates of the robot (centre position), initial orientation of the robot in radians, n_L and n_R is rotation speed [rpm] of the respectively left and right wheel
# Outputs: Final coordinates of the robot (centre position), final orientation of the robot [radians]
# Assumptions:
# 1. The robot is a rigid body
# 2. The model represents a differential drive robot
# 3. There is no S_Lip in the wheel and the surface is plane
# 4. Both wheels are turning in the forward direction
#%% Import basic libraries
from __future__ import print_function #Python2
from matplotlib import pyplot as plt
#%% Import additional libraries for a script call and parse arguments
if __name__ == '__main__':
import os
import argparse
import numpy as np
parser = argparse.ArgumentParser(prog=os.path.basename(__file__))
#%% Movement arguments
parser.add_argument('-nL', '--rotation-speed-left', dest='n_L', type=float, default=10.0, help='Rotation speed of the left wheel', metavar='N_L <!10>[1/min]')
parser.add_argument('-nR', '--rotation-speed-right', dest='n_R', type=float, default=10.0, help='Rotation speed of the right wheel', metavar='N_R <!10>[1/min]')
parser.add_argument('-Px', '--initial-position-x', dest='P_i_x', type=float, default=0.0, help='Initial position of the robot - X coordinate', metavar='P_I_X <!0>[m]')
parser.add_argument('-Py', '--initial-position-y', dest='P_i_y', type=float, default=0.0, help='Initial position of the robot - Y coordinate', metavar='P_I_Y <!0>[m]')
parser.add_argument('-Th', '--initial-orientation', dest='Theta_i', type=float, default=0.0, help='Initial orientation of the robot - Theta', metavar='THETA <!0>[rad]')
parser.add_argument('-t', '--time', dest='t', type=float, default=10.0, help='Time for the movement', metavar='t <!10.0>[s]')
parser.add_argument('-dt', '--step-time', dest='dt', type=float, default=-100.0, help='Time step for approximation for positive values and number of steps for negative', metavar='t <-100.0>[{s, 1}]')
parser.add_argument('-rL', '--radius-wheel-left', dest='r_L', type=float, default=0.1, help='Radius of the left wheel', metavar='R_L <!0.100>[m]')
parser.add_argument('-rR', '--radius-wheel-right', dest='r_R', type=float, default=0.1, help='Radius of the right wheel', metavar='R_R <!0.100>[m]')
parser.add_argument('-al', '--axle-length', dest='axle_l', type=float, default=0.205, help='Distance between wheels - Length of the axle', metavar='AXLE_L <!0.205>[m]')
#%% Additional script relevant arguments
# parser.add_argument('-1', '-p', '--pause', dest='pause', action='store_true', help='Pause between processed images')
# parser.add_argument('-cp', '--color-plot', dest='colorPlot', type=bool, help='Colorize generated plot')
parser.add_argument('-gp', '--generate-plot', dest='generatePlot', action='store_true', default=0, help='Generate a plot of movement')
parser.add_argument('-d', '--degrees', dest='degrees', action='store_true', default=0, help='Output orientation in degrees instead of radians')
# parser.add_argument('-ia', '--invert-axes', dest='invertAxes', type=bool, default=0, help='Invert axes on distortion plots')
# parser.add_argument('-l', '--log', dest='logFile', type=str, help='Write a log file with results', metavar='LOGFILENAME')
parser.add_argument('-pd', '--predefined', dest='predefined', action='store_true', default=0, help='Use a predefined sets of parameters for movement simulation')
# parser.add_argument('-si', '--show-images', dest='showImages', action='store_true', default=0, help='Show images')
# parser.add_argument('-s', '--save-images', dest='saveImages', action='store_true', default=0, help='Save images')
# parser.add_argument('-sp', '--save-path', dest='savePath', type=str, default='/tmp', default=0, help='Defines path for generated images, implies save images option', metavar='<SAVEPATH><!/tmp>')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=0, help='Produce more verbose output')
args = parser.parse_args() # Parse script call arguments
#%% Assign arguments
n_L = float(args.n_L)
n_R = float(args.n_R)
P_i_x = float(args.P_i_x)
P_i_y = float(args.P_i_y)
t = float(args.t)
dt = float(args.dt)
Theta_i = float(args.Theta_i)
r_L = float(args.r_L)
r_R = float(args.r_R)
axle_l = float(args.axle_l)
#%% Script arguments
# logFile = str(args.logFile)
degrees = bool(args.degrees)
# colorPlot = bool(args.colorPlot)
generatePlot = bool(args.generatePlot)
predefined = bool(args.predefined)
# saveImages = bool(args.saveImages)
# savePath = str(args.savePath)
# showImages = bool(args.showImages)
verbose = bool(args.verbose)
#%% Function definition
def rollo_compute_position(P_i_x, P_i_y, Theta_i, n_L, n_R, t, r_L, r_R, axle_l, verbose, degrees):
"Calculates the final position accodring to the odometry model"
if degrees:
Theta_i = Theta_i / 180.0 * np.pi
S_L = t * (n_L / 60.0) * 2 * np.pi * r_L # Linear distance traveled by left wheel in meters
S_R = t * (n_R / 60.0) * 2 * np.pi * r_R # Linear distance traveled by right wheel in meters
Beta = (S_L - S_R) / 2.0 # Travel angle
r = (S_L + S_R) / 2.0 # Travel radius
P_f_x = P_i_x + r * np.cos(Theta_i - (Beta / 2.0))
P_f_y = P_i_y + r * np.sin(Theta_i - (Beta / 2.0))
Theta_f = Theta_i - Beta
if degrees:
Theta_f = Theta_f * 180 / np.pi
if verbose and t_s + dt == t_f:
print('Linear distance traveled by left wheel (S_L [m]): ', S_L)
print('Linear distance traveled by right wheel (S_R [m]): ', S_R)
print('Travel radius [m]: ', r)
print('Travel angle [', ThetaFormat, ']:', Beta)
return [P_f_x, P_f_y, Theta_f]
#%% Main script
if __name__ == '__main__':
if degrees:
# ThetaFormat = '°'
ThetaFormat = 'deg'
else:
ThetaFormat = 'rad'
if predefined:
P_i_x = -2; P_i_y = -2;
if degrees:
Theta_i = 45
else:
Theta_i = np.pi / 4.0
n_L = 6; n_R = 5; t = 1000;
r_L = 0.1; r_R = 0.1; axle_l = 0.205;
print('Using predefined variables set')
if verbose:
print('Time (t [s]): ', t)
print('Initial position (X [m]; Y [m]): ', P_i_x, ';', P_i_y)
print('Initial orientation (Theta [', ThetaFormat, ']): ', Theta_i)
print('Axle length (axle_l [m]): ', axle_l)
print('Wheel rotation speed - left (n_L [1/min]): ', n_L)
print('Wheel rotation speed - right (n_R [1/min]): ', n_R)
print('Wheel radius - left (r_L [m]): ', r_L)
print('Wheel radius - right (r_R [m]): ', r_R)
if generatePlot:
axi = 8.0
axd = 1
markerScale = 20
lineWidth = 1.6
###TODO use array instead of scalar values
Theta_i_t = Theta_i
P_i_x_t = P_i_x
P_i_y_t = P_i_y
if dt < 0:
steps = - float(dt)
dt = t / steps
else:
steps = np.ceil(t / float(dt))
t_s = 0
t_f = t
if verbose:
print('Time step (dt [s]): ', dt)
print('Number of steps (n [1]): ', steps)
# for i in np.arange(0, t, t / step):
for i in np.arange(1, steps + 1, 1):
# [P_f_x_t, P_f_y_t, Theta_f_t] = rollo_compute_position(P_i_x_t, P_i_y_t, Theta_i_t, n_L, n_R, dt, r_L, r_R, axle_l, 0, degrees)
[P_f_x_t, P_f_y_t, Theta_f_t] = rollo_compute_position(P_i_x_t, P_i_y_t, Theta_i_t, n_L, n_R, dt, r_L, r_R, axle_l, verbose, degrees)
print('Loop:', int(i), 'Calculated position (x [m], y [m], Theta [', ThetaFormat, ']):', P_f_x_t, P_f_y_t, Theta_f_t)
# print(int(i))
###TODO calculate lowest and highest points for axes
if generatePlot:
if not degrees:
of = np.rad2deg(Theta_i_t) - 90
else:
of = Theta_i_t - 90
plt.plot(P_f_x_t, P_f_y_t, 'b', marker=(3, 0, of), markersize = markerScale / 3)
Theta_i_t = Theta_f_t
P_i_x_t = P_f_x_t
P_i_y_t = P_f_y_t
t_s = t_s + dt
P_f_x = P_f_x_t
P_f_y = P_f_y_t
Theta_f = Theta_f_t
if verbose:
print('Simulation duration (t_s [s]): ', t_s)
# [P_f_x, P_f_y, Theta_f] = rollo_compute_position(P_i_x, P_i_y, Theta_i, n_L, n_R, t, r_L, r_R, axle_l, verbose, degrees)
print('Final position (x [m], y [m]): ', P_f_x, ',', P_f_y)
if degrees:
# print('Final orientation (Theta [°]): ', Theta_f % 360)
print('Final orientation (Theta [deg]): ', Theta_f % 360)
else:
print('Final orientation (Theta [rad]): ', Theta_f % (2 * np.pi))
if generatePlot:
###TODO take the highest or lowest values from simulation and add a margin to the plot
axx = np.max([np.ceil(np.abs(P_f_x - P_i_x) / axi) * axi, np.abs(P_f_x) + axd, np.abs(P_i_x) + axd])
axy = np.max([np.ceil(np.abs(P_f_y - P_i_y) / axi) * axi, np.abs(P_f_y) + axd, np.abs(P_i_y) + axd])
# plt.axis([-axx, axx, -axy, axy])
plt.axis('equal')
if not degrees:
oi = np.rad2deg(Theta_i) - 90
of = np.rad2deg(Theta_f) - 90
else:
oi = Theta_i - 90
of = Theta_f - 90
plt.plot(P_i_x, P_i_y, 'g', marker = (3, 0, oi), markersize = markerScale, linewidth = lineWidth)
# plt.plot(P_f_x_t, P_f_y_t, 'r', marker = (3, 0, of), markersize = markerScale, linewidth = lineWidth)
plt.plot(P_f_x, P_f_y, 'r', marker = (3, 0, of), markersize = markerScale, linewidth = lineWidth)
plt.grid(1)
figure = plt.gcf()
figure.canvas.set_window_title('Rollo - Odometry model')
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.tight_layout()
plt.show()
# print(P_f_x_t, P_f_y_t, Theta_f_t, z)
# print(t, rollo_compute_position(P_i_x, P_i_y, Theta_i, n_L, n_R, t, r_L, r_R, axle_l, 0, degrees))
# print(t_s, rollo_compute_position(P_i_x, P_i_y, Theta_i, n_L, n_R, z-dt, r_L, r_R, axle_l, 0, degrees))
| gpl-2.0 |
mwaskom/lyman | conftest.py | 1 | 14325 | import os
from copy import deepcopy
import numpy as np
import pandas as pd
import nibabel as nib
import pytest
from lyman.frontend import LymanInfo
@pytest.fixture()
def execdir(tmpdir):
origdir = tmpdir.chdir()
yield tmpdir
origdir.chdir()
@pytest.fixture()
def lyman_info(tmpdir):
data_dir = tmpdir.mkdir("data")
proc_dir = tmpdir.mkdir("proc")
cache_dir = tmpdir.mkdir("cache")
os.environ["SUBJECTS_DIR"] = str(data_dir)
# TODO probably get these from default info functions
scan_info = {
"subj01": {
"sess01":
{"exp_alpha": ["run01", "run02"]},
"sess02":
{"exp_alpha": ["run01"],
"exp_beta": ["run01", "run02", "run03"]},
},
"subj02": {
"sess01":
{"exp_alpha": ["run01", "run02", "run03"]}
},
}
contrasts = [
("a", ["a", "b"], [1, 0]),
("b", ["b"], [1]),
("a-b", ["a", "b"], [1, -1])
]
info = LymanInfo().trait_set(
data_dir=str(data_dir),
proc_dir=str(proc_dir),
cache_dir=str(cache_dir),
scan_info=scan_info,
phase_encoding="ap",
fm_template="{session}_{encoding}.nii.gz",
ts_template="{session}_{experiment}_{run}.nii.gz",
sb_template="{session}_{experiment}_{run}_sbref.nii.gz",
experiment_name="exp_alpha",
crop_frames=2,
tr=1.5,
model_name="model_a",
smooth_fwhm=4,
nuisance_components=dict(wm=2, csf=2, edge=2, noise=2),
surface_smoothing=True,
interpolate_noise=True,
hpf_cutoff=10,
percent_change=True,
hrf_derivative=False,
save_residuals=True,
contrasts=contrasts,
)
subjects = ["subj01", "subj02"]
sessions = None
design = pd.DataFrame(dict(
onset=[0, 6, 12, 18, 24],
condition=["a", "b", "c", "b", "a"],
session="sess01",
run="run01",
))
for subject in subjects:
subject_dir = data_dir.mkdir(subject)
subject_dir.mkdir("mri")
subject_dir.mkdir("surf")
subject_dir.mkdir("label")
subject_dir.mkdir("func")
design_dir = subject_dir.mkdir("design")
design.to_csv(design_dir.join(
"{experiment_name}-{model_name}.csv".format(**info.trait_get())
))
vol_shape = 12, 8, 4
n_tp = 20
n_conditions = len(design["condition"].unique())
n_regressors = sum(info.nuisance_components.values())
n_params = n_conditions + n_regressors
return dict(
info=info,
subjects=subjects,
sessions=sessions,
proc_dir=proc_dir,
data_dir=data_dir,
vol_shape=vol_shape,
n_tp=n_tp,
n_params=n_params,
design=design,
)
@pytest.fixture()
def freesurfer(lyman_info):
subject = "subj01"
mri_dir = lyman_info["data_dir"].join(subject).join("mri")
label_dir = lyman_info["data_dir"].join(subject).join("label")
seed = sum(map(ord, "freesurfer"))
rs = np.random.RandomState(seed)
affine = np.eye(4)
vol_shape = lyman_info["vol_shape"]
mask = rs.choice([0, 1], vol_shape, p=[.2, .8])
norm_data = rs.randint(0, 110, vol_shape) * mask
norm_file = str(mri_dir.join("norm.mgz"))
nib.save(nib.MGHImage(norm_data.astype("uint8"), affine), norm_file)
orig_file = str(mri_dir.join("orig.mgz"))
nib.save(nib.MGHImage(norm_data.astype("uint8"), affine), orig_file)
wmparc_vals = [1000, 10, 11, 16, 8, 3000, 5001, 7, 46, 4]
wmparc_data = rs.choice(wmparc_vals, vol_shape) * mask
wmparc_file = str(mri_dir.join("wmparc.mgz"))
nib.save(nib.MGHImage(wmparc_data.astype("int16"), affine), wmparc_file)
n = 10
fmt = ["%d", "%.3f", "%.3f", "%.3f", "%.9f"]
label_data = np.c_[np.arange(n), np.zeros((n, 4))]
label_files = {}
for hemi in ["lh", "rh"]:
fname = str(label_dir.join("{}.cortex.label".format(hemi)))
label_files[hemi] = fname
np.savetxt(fname, label_data, fmt=fmt, header=str(n))
lyman_info.update(
subject=subject,
norm_file=norm_file,
orig_file=orig_file,
wmparc_file=wmparc_file,
label_files=label_files,
)
return lyman_info
@pytest.fixture()
def template(freesurfer):
subject = "subj01"
template_dir = (freesurfer["proc_dir"]
.mkdir(subject)
.mkdir("template"))
seed = sum(map(ord, "template"))
rs = np.random.RandomState(seed)
vol_shape = freesurfer["vol_shape"]
affine = np.array([[-2, 0, 0, 10],
[0, -2, -1, 10],
[0, 1, 2, 5],
[0, 0, 0, 1]])
reg_file = str(template_dir.join("anat2func.mat"))
np.savetxt(reg_file, np.random.randn(4, 4))
lut = pd.DataFrame([
["Unknown", 0, 0, 0, 0],
["Cortical-gray-matter", 59, 95, 138, 255],
["Subcortical-gray-matter", 91, 129, 129, 255],
["Brain-stem", 126, 163, 209, 255],
["Cerebellar-gray-matter", 168, 197, 233, 255],
["Superficial-white-matter", 206, 129, 134, 255],
["Deep-white-matter", 184, 103, 109, 255],
["Cerebellar-white-matter", 155, 78, 73, 255],
["CSF", 251, 221, 122, 255]
])
lut_file = str(template_dir.join("seg.lut"))
lut.to_csv(lut_file, sep="\t", header=False, index=True)
seg_data = rs.randint(0, 9, vol_shape)
seg_file = str(template_dir.join("seg.nii.gz"))
nib.save(nib.Nifti1Image(seg_data, affine), seg_file)
anat_data = rs.randint(0, 100, vol_shape)
anat_file = str(template_dir.join("anat.nii.gz"))
nib.save(nib.Nifti1Image(anat_data, affine), anat_file)
mask_data = (seg_data > 0).astype(np.uint8)
mask_file = str(template_dir.join("mask.nii.gz"))
nib.save(nib.Nifti1Image(mask_data, affine), mask_file)
edge_data = (anat_data > 60).astype(np.uint8)
edge_file = str(template_dir.join("edge.nii.gz"))
nib.save(nib.Nifti1Image(edge_data, affine), edge_file)
n_verts = (seg_data == 1).sum()
surf_ids = np.arange(n_verts)
surf_data = np.full(vol_shape + (2,), -1, np.int)
surf_data[seg_data == 1, 0] = surf_ids
surf_data[seg_data == 1, 1] = surf_ids
surf_file = str(template_dir.join("surf.nii.gz"))
nib.save(nib.Nifti1Image(surf_data, affine), surf_file)
ribbon_data = (surf_data > -1).any(axis=-1).astype(np.int8)
ribbon_file = str(template_dir.join("ribbon.nii.gz"))
nib.save(nib.Nifti1Image(ribbon_data, affine), ribbon_file)
mesh_name = "graymid"
verts = rs.uniform(-1, 1, (n_verts, 3))
faces = np.array([(i, i + 1, i + 2) for i in range(n_verts - 2)])
surf_dir = freesurfer["data_dir"].join(subject).join("surf")
mesh_files = (str(surf_dir.join("lh." + mesh_name)),
str(surf_dir.join("rh." + mesh_name)))
for fname in mesh_files:
nib.freesurfer.write_geometry(fname, verts, faces)
freesurfer.update(
vol_shape=vol_shape,
subject=subject,
lut_file=lut_file,
seg_file=seg_file,
reg_file=reg_file,
anat_file=anat_file,
edge_file=edge_file,
mask_file=mask_file,
surf_file=surf_file,
ribbon_file=ribbon_file,
mesh_name=mesh_name,
mesh_files=mesh_files,
)
return freesurfer
@pytest.fixture()
def timeseries(template):
seed = sum(map(ord, "timeseries"))
rs = np.random.RandomState(seed)
session = "sess01"
run = "run01"
exp_name = template["info"].experiment_name
model_name = template["info"].model_name
vol_shape = template["vol_shape"]
n_tp = template["n_tp"]
affine = np.eye(4)
affine[:3, :3] *= 2
timeseries_dir = (template["proc_dir"]
.join(template["subject"])
.mkdir(exp_name)
.mkdir("timeseries")
.mkdir("{}_{}".format(session, run)))
model_dir = (template["proc_dir"]
.join(template["subject"])
.join(exp_name)
.mkdir(model_name)
.mkdir("{}_{}".format(session, run)))
mask_data = nib.load(template["seg_file"]).get_fdata() > 0
mask_data &= rs.uniform(0, 1, vol_shape) > .05
mask_file = str(timeseries_dir.join("mask.nii.gz"))
nib.save(nib.Nifti1Image(mask_data.astype(np.int), affine), mask_file)
noise_data = mask_data & rs.choice([False, True], vol_shape, p=[.95, .05])
noise_file = str(timeseries_dir.join("noise.nii.gz"))
nib.save(nib.Nifti1Image(noise_data.astype(np.int), affine), noise_file)
ts_shape = vol_shape + (n_tp,)
ts_data = rs.normal(100, 5, ts_shape) * mask_data[..., np.newaxis]
ts_file = str(timeseries_dir.join("func.nii.gz"))
nib.save(nib.Nifti1Image(ts_data, affine), ts_file)
mc_data = rs.normal(0, 1, (n_tp, 6))
mc_file = str(timeseries_dir.join("mc.csv"))
cols = ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]
pd.DataFrame(mc_data, columns=cols).to_csv(mc_file)
template.update(
n_tp=n_tp,
affine=affine,
session=session,
run=run,
mask_file=mask_file,
noise_file=noise_file,
ts_file=ts_file,
mc_file=mc_file,
timeseries_dir=timeseries_dir,
model_dir=model_dir,
)
return template
@pytest.fixture()
def modelfit(timeseries):
seed = sum(map(ord, "modelfit"))
rs = np.random.RandomState(seed)
vol_shape = timeseries["vol_shape"]
affine = timeseries["affine"]
n_params = timeseries["n_params"]
n_tp = timeseries["n_tp"]
n_vox = np.product(vol_shape)
model_dir = timeseries["model_dir"]
seg_data = nib.load(timeseries["seg_file"]).get_fdata()
mask_data = nib.load(timeseries["mask_file"]).get_fdata()
mask_data = ((seg_data == 1) & (mask_data == 1)).astype(np.int)
mask_file = str(model_dir.join("mask.nii.gz"))
nib.save(nib.Nifti1Image(mask_data, affine), mask_file)
beta_data = rs.normal(0, 1, vol_shape + (n_params,))
beta_file = str(model_dir.join("beta.nii.gz"))
nib.save(nib.Nifti1Image(beta_data, affine), beta_file)
ols_data = np.empty((n_vox, n_params, n_params))
for i in range(n_vox):
X = rs.normal(0, 1, (n_tp, n_params))
ols_data[i] = np.linalg.pinv(np.dot(X.T, X))
ols_data = ols_data.reshape(vol_shape + (n_params ** 2,))
ols_file = str(model_dir.join("ols.nii.gz"))
nib.save(nib.Nifti1Image(ols_data, affine), ols_file)
error_data = rs.uniform(0, 5, vol_shape)
error_file = str(model_dir.join("error.nii.gz"))
nib.save(nib.Nifti1Image(error_data, affine), error_file)
design_data = rs.normal(0, 1, (n_tp, n_params))
columns = list(np.sort(timeseries["design"]["condition"].unique()))
for source, comp in timeseries["info"].nuisance_components.items():
columns.extend([f"{source}{i+1}" for i in range(comp)])
model_file = str(model_dir.join("model.csv"))
pd.DataFrame(design_data, columns=columns).to_csv(model_file, index=False)
timeseries.update(
n_params=n_params,
mask_file=mask_file,
beta_file=beta_file,
ols_file=ols_file,
error_file=error_file,
model_file=model_file,
)
return timeseries
@pytest.fixture()
def modelres(modelfit):
seed = sum(map(ord, "modelres"))
rs = np.random.RandomState(seed)
vol_shape = modelfit["vol_shape"]
affine = modelfit["affine"]
name_lists = [
["a", "b", "c", "a-b"],
["a", "b", "a-b"],
]
run_ns = [len(n_list) for n_list in name_lists]
info = deepcopy(modelfit["info"])
info.contrasts.insert(2, ("c", ["c"], [1]))
exp_name = modelfit["info"].experiment_name
model_name = modelfit["info"].model_name
session = "s1"
runs = ["r1", "r2"]
model_dir_base = (modelfit["proc_dir"]
.join(modelfit["subject"])
.join(exp_name)
.join(model_name))
model_dirs = [
model_dir_base.mkdir("{}_{}".format(session, run))
for run in runs
]
con_data = [rs.normal(0, 5, vol_shape + (n,)) for n in run_ns]
con_files = [str(d.join("contrast.nii.gz")) for d in model_dirs]
for d, f in zip(con_data, con_files):
nib.save(nib.Nifti1Image(d, affine), f)
var_data = [rs.uniform(0, 5, vol_shape + (n,)) for n in run_ns]
var_files = [str(d.join("variance.nii.gz")) for d in model_dirs]
for d, f in zip(var_data, var_files):
nib.save(nib.Nifti1Image(d, affine), f)
name_files = [str(d.join("contrast.txt")) for d in model_dirs]
for l, f in zip(name_lists, name_files):
np.savetxt(f, l, "%s")
modelfit.update(
info=info,
contrast_files=con_files,
variance_files=var_files,
name_files=name_files,
)
return modelfit
@pytest.fixture
def meshdata(execdir):
verts = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 1],
[2, 0, 0],
[2, 2, 2]], np.float)
faces = np.array([[0, 1, 2],
[0, 2, 3],
[2, 3, 4]], np.int)
sqrt2 = np.sqrt(2)
sqrt3 = np.sqrt(3)
sqrt8 = np.sqrt(8)
neighbors = {0: {1: 1.0, 2: sqrt3, 3: 2.0},
1: {0: 1.0, 2: sqrt2},
2: {0: sqrt3, 1: sqrt2, 3: sqrt3, 4: sqrt3},
3: {0: 2.0, 2: sqrt3, 4: sqrt8},
4: {2: sqrt3, 3: sqrt8}}
subj = "subj01"
surf = "white"
surf_dir = execdir.mkdir(subj).mkdir("surf")
for hemi in ["lh", "rh"]:
fname = str(surf_dir.join("{}.{}".format(hemi, surf)))
nib.freesurfer.write_geometry(fname, verts, faces)
meshdata = dict(
verts=verts,
faces=faces,
neighbors=neighbors,
fname=fname,
subj=subj,
surf=surf,
hemi=hemi,
)
orig_subjects_dir = os.environ.get("SUBJECTS_DIR", None)
os.environ["SUBJECTS_DIR"] = str(execdir)
yield meshdata
if orig_subjects_dir is None:
del os.environ["SUBJECTS_DIR"]
else:
os.environ["SUBJECTS_DIR"] = orig_subjects_dir
| bsd-3-clause |
Git3251/trading-with-python | historicDataDownloader/historicDataDownloader.py | 77 | 4526 | '''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | bsd-3-clause |
OshynSong/scikit-learn | sklearn/grid_search.py | 61 | 37197 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/hyperopt-0.0.2/hyperopt/tests/test_plotting.py | 3 | 1760 | """
Verify that the plotting routines can at least run.
If environment variable HYPEROPT_SHOW is defined and true,
then the plots actually appear.
"""
import unittest
import os
try:
import matplotlib
matplotlib.use('svg') # -- prevents trying to connect to X server
except ImportError:
import nose
raise nose.SkipTest()
from hyperopt import Experiment, Trials, TreeParzenEstimator
from .test_tpe import many_dists
import hyperopt.bandits
import hyperopt.plotting
def get_do_show():
rval = int(os.getenv('HYPEROPT_SHOW', '0'))
print 'do_show =', rval
return rval
class TestPlotting(unittest.TestCase):
def setUp(self):
bandit = self.bandit = many_dists()
algo = TreeParzenEstimator(bandit)
trials = Trials()
experiment = Experiment(trials, algo, async=False)
experiment.max_queue_len = 1
N=200
if 0:
import cProfile
stats = cProfile.runctx('experiment.run(N)', globals={},
locals=locals(), filename='fooprof')
import pstats
p = pstats.Stats('fooprof')
p.sort_stats('cumulative').print_stats(10)
p.sort_stats('time').print_stats(10)
else:
experiment.run(N)
self.trials = trials
def test_plot_history(self):
hyperopt.plotting.main_plot_history(
self.trials,
do_show=get_do_show())
def test_plot_histogram(self):
hyperopt.plotting.main_plot_histogram(
self.trials,
do_show=get_do_show())
def test_plot_vars(self):
hyperopt.plotting.main_plot_vars(
self.trials,
self.bandit,
do_show=get_do_show())
| bsd-3-clause |
zyndagj/reptiming_logFC | flankingRegions.py | 2 | 6599 | #!/usr/bin/env python
import numpy as np
from collections import Counter
import argparse, os, re, sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.cm as cm
from itertools import izip_longest
from operator import itemgetter
def main():
parser = argparse.ArgumentParser(description="Investigates the flanking regiosn around EL and EML.")
parser.add_argument("GFF3",metavar="GFF3", help="Segmentation Profile (mitotic)", nargs=1)
parser.add_argument("-T",metavar="STR", help="Times (Default: %(default)s)", default="ES,MS,LS", type=str)
args = parser.parse_args()
plotVars(args.T)
firstLast, allTimes = processGenome(args.GFF3[0])
plt.figure(figsize=(4,8))
ax = plt.subplot(211)
ax.xaxis.set_visible(False)
firstLastFreqs = countFreqs(firstLast)
writeFreqs(firstLastFreqs, 'EL_links.tab')
plotFreqs(firstLastFreqs, 'Frequency of EL Links')
ax = plt.subplot(212)
ax.xaxis.set_visible(False)
allFreqs = countFreqs(allTimes)
writeFreqs(allFreqs, 'EML_links.tab')
plotFreqs(allFreqs, 'Frequency of EML Links')
plt.tight_layout()
plt.savefig('relationships.png')
def plotFreqs(counter, name):
labels, inds, cinds = makeLabels()
colDict = {labels[i]:colors[cinds[i]] for i in range(len(labels))}
for k,v in counter.iteritems():
plt.plot([0,1], [v,v], 'k-')
plt.scatter([0],[v], s=60, c=colDict[k[0]])
plt.scatter([1],[v], s=60, c=colDict[k[2]])
plt.text(1.2, v, '-'.join(k).translate(None,'S'), fontsize=9)
plt.xlim((-0.25,1.8))
plt.title(name+' (total=%i)'%(sum(counter.values())), y=1.05)
def countFreqs(inList):
c = Counter()
c.update(inList)
return c
def writeFreqs(counter, outFile):
OF = open(outFile,'w')
for k,i in counter.iteritems():
OF.write('%s\t%i\n'%('\t'.join(k),i))
OF.close()
def processGenome(gff):
flName = nameList[0]+nameList[-1]
allName = ''.join(nameList)
firstLast = []
allTimes = []
for f, s, t in fileReader(gff):
if s[1] == flName:
if checkDist(f,s,t):
firstLast.append((f[1], s[1], t[1]))
elif s[1] == allName:
if checkDist(f,s,t):
allTimes.append((f[1], s[1], t[1]))
return (firstLast, allTimes)
def checkDist(f,s,t):
ntf = f[0][2] == s[0][1]
ntt = s[0][2] == t[0][1]
return ntf and ntt
def toBA(name):
'''
>>> toBA('ESMS')
array([ True, True, False], dtype=bool)
>>> toBA('MSLS')
array([False, True, True], dtype=bool)
'''
return np.array([N in name for N in nameList], dtype=np.bool)
def plotVars(names):
global nameList
nameList = names.split(',')
global colors
if names == 'ES,MS,LS':
#global times
#times = ('ES','ESMS','MS','MSLS','LS','ESLS','ESMSLS')
#myColors = ("#2250F1","#28C5CC","#1A8A12","#FFFD33","#FB0018","#EA3CF2","#FAB427")
colors = ["#FB0018","#1A8A12","#FFFD33","#2250F1","#EA3CF2","#28C5CC","#FAB427"]
else:
colors = cm.rainbow(np.linspace(0,1,2**len(nameList)-1))
def plotComp(segments, title, tileSize, chromDict, figExt):
plt.figure()
yIndex = 0.1
yHeight = 0.8
sortedChroms = sorted(chromDict.keys())
labels, inds, cinds = makeLabels()
OT = open("composition_%s.tab"%(title), 'w')
OT.write("Chr\t"+'\t \t'.join(labels)+'\t \tChr Length\n')
for chrom in sortedChroms:
otStr = '%s\t'%(chrom)
chromSize = chromDict[chrom]
X = np.zeros(2**len(nameList)-1)
for arrayStr, size in segments[chrom]:
sortedInd = inds[int(arrayStr,2)-1]
X[sortedInd] += size*tileSize
percents = list(np.round(X/float(chromSize),3))
sP = map(lambda x: str(x*100)+'%', percents)
otStr += '\t'.join([str(val) for tup in zip(X,sP) for val in tup])+'\t'+str(chromSize)+'\n'
OT.write(otStr)
xranges = zip(np.cumsum([0]+percents[:-1]), percents)
plt.broken_barh(xranges, (yIndex, yHeight), lw=0, color=[colors[i] for i in cinds])
yIndex += 1
OT.close()
plt.xlim((0,1))
plt.yticks(np.arange(0.5, len(sortedChroms)), sortedChroms)
plt.ylabel("Chromosome")
plt.xlabel("Fraction of Chromosome")
plt.title(title+" Chromosome Composition")
patches = [mpatches.Patch(color=colors[cinds[i]], label=labels[i]) for i in xrange(len(labels))]
plt.figlegend(patches, labels, loc='center right', ncol=1, frameon=False)
plt.tight_layout(rect=[0,0,0.81,1.0])
plt.savefig("composition_%s.%s"%(title, figExt))
plt.close()
def makeLabels():
'''
>>> makeLabels()
['ES', 'ESMS', 'ESLS', 'ESMSLS', 'MS', 'MSLS', 'LS']
'''
labels = []
numNames = 2**len(nameList)-1
for i in range(numNames):
binRep = map(int, np.binary_repr(i+1,len(nameList)))
name = ''
for binI in range(len(binRep)):
if binRep[binI]: name += nameList[binI]
boolA = np.array(binRep, dtype=np.bool)
val = np.mean(np.where(boolA))
labels.append((name,val))
sortedLabels = sorted(labels, key=itemgetter(1,0))
inds = [sortedLabels.index(x) for x in labels]
cinds = [labels.index(x) for x in sortedLabels]
return (map(lambda x: x[0], sortedLabels), inds, cinds)
def plotSize(segments, title, tileSize, figExt):
X = [[] for i in xrange(2**len(nameList)-1)]
labels, inds, cinds = makeLabels()
for chromList in segments.itervalues():
for arrayStr, size in chromList:
base10 = int(arrayStr,2)
sortedInd = inds[base10-1]
X[sortedInd].append(size*tileSize)
print "%s Size Distribution"%(title)
print "%-6s %10s %10s %10s %10s %10s %10s"%("","min","1st-Q","median","3rd-Q","max",'count')
for segment, xIndex in zip(labels, range(len(labels))):
fiveSum = fivenum(X[xIndex]) # (min, 1st-Q, median, 3rd-Q, max)
args = (segment,)+fiveSum+(len(X[xIndex]),)
print "%-6s %10.1f %10.1f %10.1f %10.1f %10.1f %10i"%args
plt.figure()
plt.boxplot(X, labels=labels, showfliers=False)
plt.ylabel("Segment Size (bp)")
plt.xlabel("Time")
plt.title(title+" Size Distribution")
plt.savefig("size_dist_%s.%s"%(title, figExt))
plt.close()
def fileReader(a):
if not os.path.splitext(a)[1] == '.gff3':
sys.exit("%s is not a gff3 file"%(a))
IF = open(a,'r')
# Skip header
tmpLine = IF.readline()
while tmpLine[0] == '#': tmpLine = IF.readline()
sLine = lineParser(tmpLine) #((chrom, start, end), name)
tLine = lineParser(IF.readline())
for line in IF:
if line[0] != '#':
fLine = sLine
sLine = tLine
tLine = lineParser(line)
yield((fLine, sLine, tLine))
# pre-compiled RE for finding the name in the GFF
nameRE = re.compile(r'Name=([^;]+);')
def lineParser(line):
tmp = line.split('\t')
try:
location = (tmp[0], int(tmp[3])-1, int(tmp[4])) # (chrom, start, end)
except:
print "Couldn't parse:", tmp
sys.exit()
name = nameRE.search(tmp[8]).group(1) # name
return location, name
if __name__ == "__main__":
main()
else:
nameList = ['ES','MS','LS']
| bsd-2-clause |
kayarre/Tools | mpi_ascii.py | 1 | 6255 | #mpi_ascii.py
from mpi4py import MPI
import numpy as np
import pandas as pd
import os
import h5py
def write_fields(dbFile, time_idx, time_pt,
velocity, compression=None, add_fields=None):
timeIdxGroup = dbFile.create_group("field_{0:d}".format(time_idx))
timeIdxGroup.attrs["time"] = np.float32(time_pt)
timeIdxGroup.create_dataset("velocity", data=velocity,
dtype=np.float32, compression=compression)
if(add_fields != None):
for fld in add_fields.keys():
timeIdxGroup.create_dataset(fld, data=add_fields[fld],
dtype=np.float32, compression=compression)
def initialize_write(dbFile, time_idx, time_pt,
coords, velocity, compression=None, nodes=None,
add_fields=None):
"""Write the velocity field into an HDF5 file.
Will also write the corresponding time value.
Parameters
---------
hdf5File : h5py.File
The the HDF5 file.
time_pt : float
The value of time associated with the written
velocity field.
coords : list of 1d ndarray
A list 1d ndarray containing the points.
velocity : list of 1d ndarray
A list of ndarrays to write to velocity field.
"""
pointGroup = dbFile.create_group("coordinates")
pointGroup.attrs["nPoints"] = np.int64(coords.shape[0])
pointGroup.attrs["dimension"] = np.int32(coords.shape[1])
pointGroup.create_dataset("coordinates", data=coords,
dtype=np.float32, compression=compression)
if(nodes != None):
pointGroup.create_dataset("nodes", data=nodes,
dtype=np.int32, compression=compression)
write_fields(dbFile, time_idx, time_pt,
velocity, add_fields=None, compression=compression)
def write_time(dbFile, time, time_idx, compression=None):
timeGroup = dbFile.create_group("time")
timeGroup.attrs["tPoints"] = np.int32(time.shape[0])
timeGroup.create_dataset("time", data=time,
dtype=np.float32, compression=compression)
timeGroup.create_dataset("time_index", data=time_idx,
dtype=np.int32, compression=compression)
def write_binary(dbFile, sol_files, stop_n, compression=None):
time = []
var_dict = {}
for file_idx, file_ in enumerate(sol_files):
if (file_idx >= stop_n):
break
if os.path.isfile(file_):
print(os.path.split(file_)[-1])
split_name = file_.split('-')
time_pt = float(split_name[-1])
print(time_pt)
time.append(time_pt)
data = pd.read_csv(file_, sep='\s+', usecols=np.arange(1,7)).values
if(file_idx == 0):
coords = data[:,0:3]
v = data[:,3:6]
initialize_write(dbFile, file_idx, split_name[-1], coords, v, compression)
else:
v = data[:,3:6]
write_fields(dbFile, file_idx, split_name[-1], v, compression)
time = np.asarray(time)
length_t = time.shape[0]
index_t = np.arange(length_t)
write_time(dbFile, time, index_t, compression)
def run_script():
rank = MPI.COMM_WORLD.Get_rank()
size = MPI.COMM_WORLD.Get_size()
name = MPI.Get_processor_name()
# ******************************
# actual (serial) work goes here
# ******************************
#python ascii_2_binary.py --dir_path=/raid/home/ksansom/caseFiles/ultrasound/cases/DSI006DA/fluent_4PL --search_name=DSI006DA_4PL_ascii-* --output_name=DSI006DA_4PL --n_steps=2
dir_path = "/raid/home/ksansom/caseFiles/ultrasound/cases"
sub_dir = "fluent"
file_list_name = "file_list"
out_path_dir = "post_proc"
case_dict = {"DSI002CARc" : [1, 635],
"DSI003LERd" : [2, 836],
"DSI006DA" : [3, 849],
"DSI007LERb" : [4, 644],
"DSI009CALb" : [5, 691],
"DSI010CALb" : [6, 1030],
"DSI010LERd" : [7, 908],
"DSI011CARe" : [8, 1769],
"DSI015DALd" : [9, 1930]
}
#print("Hello, world! This is rank {0:d} of {1:d} running on {2:s}".format(rank, size, name))
case_id = case_dict.keys()
length_cases = len(case_dict.keys())
if(rank == 0):
print(length_cases)
#length_cases = 2
if(length_cases > size):
print("Not enough processors to complete")
MPI.COMM_WORLD.Abort()
else:
for idx, case in enumerate(case_id):
#here is one case execute for that rank
#print("yo", idx, case)
if(rank > length_cases):
continue
elif(rank+1 == idx):
print("run case {0:s} on rank {1:d}".format(case, rank))
ascii_path = os.path.join(dir_path, case, sub_dir)
file_list_path = os.path.join(ascii_path, file_list_name)
out_file_name = "{0:s}.hdf5".format(case)
out_path = os.path.join(ascii_path, out_path_dir)
#out_file_path = os.path.join(out_path, out_file_name)
if not os.path.exists(out_path):
print("creating path directory")
os.makedirs(out_path)
sol_names = pd.read_csv(file_list_path, header=None).values
sol_files = [os.path.join(ascii_path, str(i[0])) for i in sol_names]
#make sure all the files exist
for fname in sol_files:
if(not os.path.isfile(fname)):
print("There is a missing file in case {0:s}".format(case))
print(fname)
continue
out_file_path = os.path.join(out_path, out_file_name)
# Allocate arrays for the fluctuations
if os.path.isfile(out_file_path):
print("HDF5 file already exists. It it will be overwritten.")
try:
os.remove(out_file_path)
except FileNotFoundError:
print("Error trying to delete: {0:s}".format(out_file_path))
dbFile = h5py.File(out_file_path, 'w')
#dbFile = h5py.File(out_file_path, 'a', driver='mpio', comm=MPI.COMM_WORLD)
compression = "lzf"
#compression = None
#write_binary(dbFile, sol_files, case_dict[case][1], compression)
write_binary(dbFile, sol_files, 2, compression)
MPI.COMM_WORLD.Barrier() # wait for everybody to synchronize _here_
if ( __name__ == '__main__' ):
run_script()
| bsd-2-clause |
mgoddard-pivotal/data-science-from-scratch | code/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
yavalvas/yav_com | build/matplotlib/examples/units/basic_units.py | 9 | 11157 | import math
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
from matplotlib.axes import Axes
from matplotlib.cbook import iterable
class ProxyDelegate(object):
def __init__(self, fn_name, proxy_type):
self.proxy_type = proxy_type
self.fn_name = fn_name
def __get__(self, obj, objtype=None):
return self.proxy_type(self.fn_name, obj)
class TaggedValueMeta (type):
def __init__(cls, name, bases, dict):
for fn_name in cls._proxies.keys():
try:
dummy = getattr(cls, fn_name)
except AttributeError:
setattr(cls, fn_name,
ProxyDelegate(fn_name, cls._proxies[fn_name]))
class PassThroughProxy(object):
def __init__(self, fn_name, obj):
self.fn_name = fn_name
self.target = obj.proxy_target
def __call__(self, *args):
fn = getattr(self.target, self.fn_name)
ret = fn(*args)
return ret
class ConvertArgsProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
for a in args:
try:
converted_args.append(a.convert_to(self.unit))
except AttributeError:
converted_args.append(TaggedValue(a, self.unit))
converted_args = tuple([c.get_value() for c in converted_args])
return PassThroughProxy.__call__(self, *converted_args)
class ConvertReturnProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
ret = PassThroughProxy.__call__(self, *args)
if (type(ret) == type(NotImplemented)):
return NotImplemented
return TaggedValue(ret, self.unit)
class ConvertAllProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
arg_units = [self.unit]
for a in args:
if hasattr(a, 'get_unit') and not hasattr(a, 'convert_to'):
# if this arg has a unit type but no conversion ability,
# this operation is prohibited
return NotImplemented
if hasattr(a, 'convert_to'):
try:
a = a.convert_to(self.unit)
except:
pass
arg_units.append(a.get_unit())
converted_args.append(a.get_value())
else:
converted_args.append(a)
if hasattr(a, 'get_unit'):
arg_units.append(a.get_unit())
else:
arg_units.append(None)
converted_args = tuple(converted_args)
ret = PassThroughProxy.__call__(self, *converted_args)
if (type(ret) == type(NotImplemented)):
return NotImplemented
ret_unit = unit_resolver(self.fn_name, arg_units)
if (ret_unit == NotImplemented):
return NotImplemented
return TaggedValue(ret, ret_unit)
class _TaggedValue(object):
_proxies = {'__add__': ConvertAllProxy,
'__sub__': ConvertAllProxy,
'__mul__': ConvertAllProxy,
'__rmul__': ConvertAllProxy,
'__cmp__': ConvertAllProxy,
'__lt__': ConvertAllProxy,
'__gt__': ConvertAllProxy,
'__len__': PassThroughProxy}
def __new__(cls, value, unit):
# generate a new subclass for value
value_class = type(value)
try:
subcls = type('TaggedValue_of_%s' % (value_class.__name__),
tuple([cls, value_class]),
{})
if subcls not in units.registry:
units.registry[subcls] = basicConverter
return object.__new__(subcls, value, unit)
except TypeError:
if cls not in units.registry:
units.registry[cls] = basicConverter
return object.__new__(cls, value, unit)
def __init__(self, value, unit):
self.value = value
self.unit = unit
self.proxy_target = self.value
def __getattribute__(self, name):
if (name.startswith('__')):
return object.__getattribute__(self, name)
variable = object.__getattribute__(self, 'value')
if (hasattr(variable, name) and name not in self.__class__.__dict__):
return getattr(variable, name)
return object.__getattribute__(self, name)
def __array__(self, t=None, context=None):
if t is not None:
return np.asarray(self.value).astype(t)
else:
return np.asarray(self.value, 'O')
def __array_wrap__(self, array, context):
return TaggedValue(array, self.unit)
def __repr__(self):
return 'TaggedValue(' + repr(self.value) + ', ' + repr(self.unit) + ')'
def __str__(self):
return str(self.value) + ' in ' + str(self.unit)
def __len__(self):
return len(self.value)
def __iter__(self):
class IteratorProxy(object):
def __init__(self, iter, unit):
self.iter = iter
self.unit = unit
def __next__(self):
value = next(self.iter)
return TaggedValue(value, self.unit)
next = __next__ # for Python 2
return IteratorProxy(iter(self.value), self.unit)
def get_compressed_copy(self, mask):
new_value = np.ma.masked_array(self.value, mask=mask).compressed()
return TaggedValue(new_value, self.unit)
def convert_to(self, unit):
if (unit == self.unit or not unit):
return self
new_value = self.unit.convert_value_to(self.value, unit)
return TaggedValue(new_value, unit)
def get_value(self):
return self.value
def get_unit(self):
return self.unit
TaggedValue = TaggedValueMeta('TaggedValue', (_TaggedValue, ), {})
class BasicUnit(object):
def __init__(self, name, fullname=None):
self.name = name
if fullname is None:
fullname = name
self.fullname = fullname
self.conversions = dict()
def __repr__(self):
return 'BasicUnit(%s)'%self.name
def __str__(self):
return self.fullname
def __call__(self, value):
return TaggedValue(value, self)
def __mul__(self, rhs):
value = rhs
unit = self
if hasattr(rhs, 'get_unit'):
value = rhs.get_value()
unit = rhs.get_unit()
unit = unit_resolver('__mul__', (self, unit))
if (unit == NotImplemented):
return NotImplemented
return TaggedValue(value, unit)
def __rmul__(self, lhs):
return self*lhs
def __array_wrap__(self, array, context):
return TaggedValue(array, self)
def __array__(self, t=None, context=None):
ret = np.array([1])
if t is not None:
return ret.astype(t)
else:
return ret
def add_conversion_factor(self, unit, factor):
def convert(x):
return x*factor
self.conversions[unit] = convert
def add_conversion_fn(self, unit, fn):
self.conversions[unit] = fn
def get_conversion_fn(self, unit):
return self.conversions[unit]
def convert_value_to(self, value, unit):
conversion_fn = self.conversions[unit]
ret = conversion_fn(value)
return ret
def get_unit(self):
return self
class UnitResolver(object):
def addition_rule(self, units):
for unit_1, unit_2 in zip(units[:-1], units[1:]):
if (unit_1 != unit_2):
return NotImplemented
return units[0]
def multiplication_rule(self, units):
non_null = [u for u in units if u]
if (len(non_null) > 1):
return NotImplemented
return non_null[0]
op_dict = {
'__mul__': multiplication_rule,
'__rmul__': multiplication_rule,
'__add__': addition_rule,
'__radd__': addition_rule,
'__sub__': addition_rule,
'__rsub__': addition_rule}
def __call__(self, operation, units):
if (operation not in self.op_dict):
return NotImplemented
return self.op_dict[operation](self, units)
unit_resolver = UnitResolver()
cm = BasicUnit('cm', 'centimeters')
inch = BasicUnit('inch', 'inches')
inch.add_conversion_factor(cm, 2.54)
cm.add_conversion_factor(inch, 1/2.54)
radians = BasicUnit('rad', 'radians')
degrees = BasicUnit('deg', 'degrees')
radians.add_conversion_factor(degrees, 180.0/np.pi)
degrees.add_conversion_factor(radians, np.pi/180.0)
secs = BasicUnit('s', 'seconds')
hertz = BasicUnit('Hz', 'Hertz')
minutes = BasicUnit('min', 'minutes')
secs.add_conversion_fn(hertz, lambda x: 1./x)
secs.add_conversion_factor(minutes, 1/60.0)
# radians formatting
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return r'$%s\pi$' % (n/2,)
else:
return r'$%s\pi/2$' % (n,)
class BasicUnitConverter(units.ConversionInterface):
@staticmethod
def axisinfo(unit, axis):
'return AxisInfo instance for x and unit'
if unit==radians:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.fullname,
)
elif unit==degrees:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter(r'$%i^\circ$'),
label=unit.fullname,
)
elif unit is not None:
if hasattr(unit, 'fullname'):
return units.AxisInfo(label=unit.fullname)
elif hasattr(unit, 'unit'):
return units.AxisInfo(label=unit.unit.fullname)
return None
@staticmethod
def convert(val, unit, axis):
if units.ConversionInterface.is_numlike(val):
return val
if iterable(val):
return [thisval.convert_to(unit).get_value() for thisval in val]
else:
return val.convert_to(unit).get_value()
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
if iterable(x):
for thisx in x:
return thisx.unit
return x.unit
def cos(x):
if iterable(x):
return [math.cos(val.convert_to(radians).get_value()) for val in x]
else:
return math.cos(x.convert_to(radians).get_value())
basicConverter = BasicUnitConverter()
units.registry[BasicUnit] = basicConverter
units.registry[TaggedValue] = basicConverter
| mit |
Frankkkkk/arctic | tests/integration/chunkstore/test_chunkstore.py | 1 | 59477 | from pandas import DataFrame, MultiIndex, Index, Series
from datetime import datetime as dt
from pandas.util.testing import assert_frame_equal, assert_series_equal
from arctic.date import DateRange
from arctic.exceptions import NoDataFoundException
import pandas as pd
import numpy as np
import random
import pytest
import pymongo
import pickle
from arctic.chunkstore.chunkstore import START, SYMBOL
from arctic.chunkstore.passthrough_chunker import PassthroughChunker
def test_write_dataframe(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df, chunk_size='D')
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(df, read_df)
def test_upsert_dataframe(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.update('test_df', df, upsert=True)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(df, read_df)
def test_write_dataframe_noindex(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)]
}
)
chunkstore_lib.write('test_df', df)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(df, read_df)
def test_overwrite_dataframe(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3, 4]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1),
(dt(2016, 1, 4), 1)],
names=['date', 'id'])
)
dg = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df)
chunkstore_lib.write('test_df', dg)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(dg, read_df)
def test_overwrite_dataframe_noindex(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3, 4],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3),
dt(2016, 1, 4)]})
df2 = DataFrame(data={'data': [5, 6, 7, 8],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3),
dt(2016, 1, 4)]})
chunkstore_lib.write('test_df', df)
chunkstore_lib.write('test_df', df2)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(df2, read_df)
def test_overwrite_dataframe_monthly(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3, 4, 5, 6]},
index=MultiIndex.from_tuples([(dt(2016, 1, 5), 1),
(dt(2016, 2, 5), 1),
(dt(2016, 3, 5), 1),
(dt(2016, 4, 5), 1),
(dt(2016, 5, 5), 1),
(dt(2016, 6, 5), 1)],
names=['date', 'id'])
)
dg = DataFrame(data={'data': [1, 2, 3, 4, 5, 6]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 2, 2), 1),
(dt(2016, 3, 3), 1),
(dt(2016, 4, 4), 1),
(dt(2016, 5, 5), 1),
(dt(2016, 6, 6), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df, chunk_size='M')
chunkstore_lib.write('test_df', dg, chunk_size='M')
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(dg, read_df)
def test_write_read_with_daterange(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
dg = DataFrame(data={'data': [1, 2]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df)
read_df = chunkstore_lib.read('test_df', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 2)))
assert_frame_equal(read_df, dg)
read_with_dr = chunkstore_lib.read('test_df', chunk_range=pd.date_range(dt(2016, 1, 1), dt(2016, 1, 2)))
assert_frame_equal(read_df, dg)
def test_write_read_with_daterange_noindex(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)]})
dg = DataFrame(data={'data': [1, 2],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2)]})
chunkstore_lib.write('test_df', df)
read_df = chunkstore_lib.read('test_df', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 2)))
assert_frame_equal(read_df, dg)
def test_store_single_index_df(chunkstore_lib):
df = DataFrame(data=[1, 2, 3],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 3)))
assert_frame_equal(df, ret)
def test_no_range(chunkstore_lib):
df = DataFrame(data=[1, 2, 3],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
ret = chunkstore_lib.read('chunkstore_test')
assert_frame_equal(df, ret)
def test_closed_open(chunkstore_lib):
df = DataFrame(data=[1, 2, 3],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), None))
assert_frame_equal(df, ret)
def test_open_closed(chunkstore_lib):
df = DataFrame(data=[1, 2, 3],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(None, dt(2017, 1, 1)))
assert_frame_equal(df, ret)
def test_closed_open_no_index(chunkstore_lib):
df = DataFrame(data={'date': [1, 2, 3],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)]
}
)
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), None))
assert_frame_equal(df, ret)
def test_open_open_no_index(chunkstore_lib):
df = DataFrame(data={'date': [1, 2, 3],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)]
}
)
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(None, None))
assert_frame_equal(df, ret)
def test_monthly_df(chunkstore_lib):
df = DataFrame(data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3),
dt(2016, 1, 4),
dt(2016, 1, 5),
dt(2016, 2, 1),
dt(2016, 2, 2),
dt(2016, 2, 3),
dt(2016, 2, 4),
dt(2016, 3, 1)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='M')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 2)))
assert len(ret) == 2
assert_frame_equal(df, chunkstore_lib.read('chunkstore_test'))
def test_yearly_df(chunkstore_lib):
df = DataFrame(data=[1, 2, 3],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 2, 1),
dt(2016, 3, 3)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='A')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 3, 3)))
assert_frame_equal(df, ret)
def test_append_daily(chunkstore_lib):
df = DataFrame(data=[1, 2, 3],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
df2 = DataFrame(data=[4, 5, 6],
index=Index(data=[dt(2016, 1, 4),
dt(2016, 1, 5),
dt(2016, 1, 6)],
name='date'),
columns=['data'])
chunkstore_lib.append('chunkstore_test', df2)
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 6)))
assert_frame_equal(ret, pd.concat([df, df2]))
def test_append_monthly(chunkstore_lib):
df = DataFrame(data=[1, 2, 3],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 2, 1),
dt(2016, 3, 1)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='M')
df2 = DataFrame(data=[4, 5, 6],
index=Index(data=[dt(2016, 4, 1),
dt(2016, 5, 1),
dt(2016, 6, 1)],
name='date'),
columns=['data'])
chunkstore_lib.append('chunkstore_test', df2)
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 12, 31)))
assert_frame_equal(ret, pd.concat([df, df2]))
def test_append_yearly(chunkstore_lib):
df = DataFrame(data=[1, 2, 3],
index=Index(data=[dt(2010, 1, 1),
dt(2011, 1, 1),
dt(2012, 1, 1)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='A')
df2 = DataFrame(data=[4, 5, 6],
index=Index(data=[dt(2013, 1, 1),
dt(2014, 1, 1),
dt(2015, 1, 1)],
name='date'),
columns=['data'])
chunkstore_lib.append('chunkstore_test', df2)
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2000, 1, 1), dt(2100, 12, 31)))
assert_frame_equal(ret, pd.concat([df, df2]))
def test_append_existing_chunk(chunkstore_lib):
df = DataFrame(data=[1.7, 2.8, 3.1],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='M')
df2 = DataFrame(data=[4.0, 5.1, 6.9],
index=Index(data=[dt(2016, 1, 4),
dt(2016, 1, 5),
dt(2016, 1, 6)],
name='date'),
columns=['data'])
chunkstore_lib.append('chunkstore_test', df2)
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 31)))
assert_frame_equal(ret, pd.concat([df, df2]))
def test_store_objects_df(chunkstore_lib):
df = DataFrame(data=['1', '2', '3'],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)],
name='date'),
columns=['data'])
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 3)))
assert_frame_equal(df, ret)
def test_empty_range(chunkstore_lib):
df = DataFrame(data={'data': [1], 'values': [10]},
index=Index(data=[dt(2016, 1, 1)], name='date'))
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
df = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 2)))
assert(df.empty)
def test_update(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'))
df2 = DataFrame(data={'data': [20, 30, 40]},
index=pd.Index(data=[dt(2016, 1, 2),
dt(2016, 1, 3),
dt(2016, 1, 4)], name='date'))
equals = DataFrame(data={'data': [1, 20, 30, 40]},
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3),
dt(2016, 1, 4)], name='date'))
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
chunkstore_lib.update('chunkstore_test', df2)
assert_frame_equal(chunkstore_lib.read('chunkstore_test'), equals)
assert(chunkstore_lib.get_info('chunkstore_test')['len'] == len(equals))
assert(chunkstore_lib.get_info('chunkstore_test')['chunk_count'] == len(equals))
def test_update_no_overlap(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'))
df2 = DataFrame(data={'data': [20, 30, 40]},
index=pd.Index(data=[dt(2015, 1, 2),
dt(2015, 1, 3),
dt(2015, 1, 4)], name='date'))
equals = DataFrame(data={'data': [20, 30, 40, 1, 2, 3]},
index=pd.Index(data=[dt(2015, 1, 2),
dt(2015, 1, 3),
dt(2015, 1, 4),
dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'))
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
chunkstore_lib.update('chunkstore_test', df2)
assert_frame_equal(chunkstore_lib.read('chunkstore_test'), equals)
def test_update_chunk_range(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=pd.Index(data=[dt(2015, 1, 1),
dt(2015, 1, 2),
dt(2015, 1, 3)], name='date'))
df2 = DataFrame(data={'data': [30]},
index=pd.Index(data=[dt(2015, 1, 2)],
name='date'))
equals = DataFrame(data={'data': [30, 3]},
index=pd.Index(data=[dt(2015, 1, 2),
dt(2015, 1, 3)],
name='date'))
chunkstore_lib.write('chunkstore_test', df, chunk_size='M')
chunkstore_lib.update('chunkstore_test', df2, chunk_range=DateRange(dt(2015, 1, 1), dt(2015, 1, 2)))
assert_frame_equal(chunkstore_lib.read('chunkstore_test'), equals)
def test_update_chunk_range_overlap(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=pd.Index(data=[dt(2015, 1, 1),
dt(2015, 1, 2),
dt(2015, 1, 3)], name='date'))
chunkstore_lib.write('chunkstore_test', df, chunk_size='M')
chunkstore_lib.update('chunkstore_test', df, chunk_range=DateRange(dt(2015, 1, 1), dt(2015, 1, 3)))
assert_frame_equal(chunkstore_lib.read('chunkstore_test'), df)
def test_append_before(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'))
df2 = DataFrame(data={'data': [20, 30, 40]},
index=pd.Index(data=[dt(2015, 1, 2),
dt(2015, 1, 3),
dt(2015, 1, 4)], name='date'))
equals = DataFrame(data={'data': [20, 30, 40, 1, 2, 3]},
index=pd.Index(data=[dt(2015, 1, 2),
dt(2015, 1, 3),
dt(2015, 1, 4),
dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'))
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
chunkstore_lib.append('chunkstore_test', df2)
assert_frame_equal(chunkstore_lib.read('chunkstore_test') , equals)
def test_append_and_update(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'))
df2 = DataFrame(data={'data': [20, 30, 40]},
index=pd.Index(data=[dt(2015, 1, 2),
dt(2015, 1, 3),
dt(2015, 1, 4)], name='date'))
df3 = DataFrame(data={'data': [100, 300]},
index=pd.Index(data=[dt(2015, 1, 2),
dt(2016, 1, 2)], name='date'))
equals = DataFrame(data={'data': [100, 30, 40, 1, 300, 3]},
index=pd.Index(data=[dt(2015, 1, 2),
dt(2015, 1, 3),
dt(2015, 1, 4),
dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'))
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
chunkstore_lib.append('chunkstore_test', df2)
chunkstore_lib.update('chunkstore_test', df3)
assert_frame_equal(chunkstore_lib.read('chunkstore_test') , equals)
def test_update_same_df(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'))
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
sym = chunkstore_lib._get_symbol_info('chunkstore_test')
chunkstore_lib.update('chunkstore_test', df)
assert(sym == chunkstore_lib._get_symbol_info('chunkstore_test'))
def test_df_with_multiindex(chunkstore_lib):
df = DataFrame(data=[1, 2, 3],
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 2),
(dt(2016, 1, 2), 3),
(dt(2016, 1, 3), 2)],
names=['date', 'security']))
chunkstore_lib.write('pandas', df, chunk_size='D')
saved_df = chunkstore_lib.read('pandas')
assert np.all(df.values == saved_df.values)
def test_with_strings(chunkstore_lib):
df = DataFrame(data={'data': ['A', 'B', 'C']},
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'))
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
read_df = chunkstore_lib.read('chunkstore_test')
assert_frame_equal(read_df, df)
def test_with_strings_multiindex_append(chunkstore_lib):
df = DataFrame(data={'data': ['A', 'BBB', 'CC']},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 2),
(dt(2016, 1, 1), 3),
(dt(2016, 1, 2), 2)],
names=['date', 'security']))
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
read_df = chunkstore_lib.read('chunkstore_test')
assert_frame_equal(read_df, df)
df2 = DataFrame(data={'data': ['AAAAAAA']},
index=MultiIndex.from_tuples([(dt(2016, 1, 2), 4)],
names=['date', 'security']))
chunkstore_lib.append('chunkstore_test', df2)
df = DataFrame(data={'data': ['A', 'BBB', 'CC', 'AAAAAAA']},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 2),
(dt(2016, 1, 1), 3),
(dt(2016, 1, 2), 2),
(dt(2016, 1, 2), 4)],
names=['date', 'security']))
assert_frame_equal(chunkstore_lib.read('chunkstore_test') , df)
def gen_daily_data(month, days, securities):
for day in days:
openp = [round(random.uniform(50.0, 100.0), 1) for x in securities]
closep = [round(x + random.uniform(-5.0, 5.0), 1) for x in openp]
index_list = [(dt(2016, month, day), s) for s in securities]
yield DataFrame(data={'open': openp, 'close': closep},
index=MultiIndex.from_tuples(index_list,
names=['date', 'security']))
def write_random_data(chunkstore_lib, name, month, days, securities, chunk_size='D', update=False, append=False):
'''
will generate daily data and write it in daily chunks
regardless of what the chunk_size is set to.
month: integer
days: list of integers
securities: list of integers
chunk_size: one of 'D', 'M', 'A'
update: force update for each daily write
append: force append for each daily write
'''
df_list = []
for df in gen_daily_data(month, days, securities):
if update:
chunkstore_lib.update(name, df)
elif append or len(df_list) > 0:
chunkstore_lib.append(name, df)
else:
chunkstore_lib.write(name, df, chunk_size=chunk_size)
df_list.append(df)
return pd.concat(df_list)
def test_multiple_actions(chunkstore_lib):
def helper(chunkstore_lib, name, chunk_size):
written_df = write_random_data(chunkstore_lib, name, 1, list(range(1, 31)), list(range(1, 101)), chunk_size=chunk_size)
read_info = chunkstore_lib.read(name)
assert_frame_equal(written_df, read_info)
df = write_random_data(chunkstore_lib, name, 1, list(range(1, 31)), list(range(1, 101)), chunk_size=chunk_size)
read_info = chunkstore_lib.read(name)
assert_frame_equal(df, read_info)
r = read_info
df = write_random_data(chunkstore_lib, name, 2, list(range(1, 29)), list(range(1, 501)), append=True, chunk_size=chunk_size)
read_info = chunkstore_lib.read(name)
assert_frame_equal(pd.concat([r, df]), read_info)
for chunk_size in ['D', 'M', 'A']:
helper(chunkstore_lib, 'test_data_' + chunk_size, chunk_size)
def test_multiple_actions_monthly_data(chunkstore_lib):
def helper(chunkstore_lib, chunk_size, name, df, append):
chunkstore_lib.write(name, df, chunk_size=chunk_size)
r = chunkstore_lib.read(name)
assert_frame_equal(r, df)
chunkstore_lib.append(name, append)
assert_frame_equal(chunkstore_lib.read(name), pd.concat([df, append]))
chunkstore_lib.update(name, append)
if chunk_size is not 'A':
assert_frame_equal(chunkstore_lib.read(name), pd.concat([df, append]))
else:
# chunksize is the entire DF, so we'll overwrite the whole thing
# with the update when its yearly chunking
assert_frame_equal(chunkstore_lib.read(name), append)
df = []
for month in range(1, 4):
df.extend(list(gen_daily_data(month, list(range(1, 21)), list(range(1, 11)))))
df = pd.concat(df)
append = []
for month in range(6, 10):
append.extend(list(gen_daily_data(month, list(range(1, 21)), list(range(1, 11)))))
append = pd.concat(append)
for chunk_size in ['D', 'M', 'A']:
helper(chunkstore_lib, chunk_size, 'test_monthly_' + chunk_size, df, append)
def test_delete(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(df, read_df)
assert ('test_df' in chunkstore_lib.list_symbols())
chunkstore_lib.delete('test_df')
assert (chunkstore_lib.list_symbols() == [])
def test_get_info(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df)
info = {'len': 3,
'appended_rows': 0,
'chunk_count': 3,
'metadata': {'columns': [u'date', u'id', u'data']},
'chunker': u'date',
'chunk_size': 'D',
'serializer': u'FrameToArray'
}
assert(chunkstore_lib.get_info('test_df') == info)
def test_get_info_after_append(chunkstore_lib):
df = DataFrame(data={'data': [1.1, 2.1, 3.1]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df)
df2 = DataFrame(data={'data': [1.1, 1.1, 1.1]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 2),
(dt(2016, 1, 2), 2),
(dt(2016, 1, 4), 1)],
names=['date', 'id'])
)
chunkstore_lib.append('test_df', df2)
assert_frame_equal(chunkstore_lib.read('test_df'), pd.concat([df, df2]).sort_index())
info = {'len': 6,
'appended_rows': 2,
'chunk_count': 4,
'metadata': {'columns': [u'date', u'id', u'data']},
'chunker': u'date',
'chunk_size': u'D',
'serializer': u'FrameToArray'
}
assert(chunkstore_lib.get_info('test_df') == info)
def test_get_info_after_update(chunkstore_lib):
df = DataFrame(data={'data': [1.1, 2.1, 3.1]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df)
df2 = DataFrame(data={'data': [1.1, 1.1, 1.1]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 2),
(dt(2016, 1, 2), 2),
(dt(2016, 1, 4), 1)],
names=['date', 'id'])
)
chunkstore_lib.update('test_df', df2)
info = {'len': 4,
'appended_rows': 0,
'chunk_count': 4,
'metadata': {'columns': [u'date', u'id', u'data']},
'chunker': u'date',
'chunk_size': u'D',
'serializer': u'FrameToArray'
}
assert(chunkstore_lib.get_info('test_df') == info)
def test_delete_range(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3, 4, 5, 6]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 2, 1), 1),
(dt(2016, 2, 2), 1),
(dt(2016, 3, 1), 1),
(dt(2016, 3, 2), 1)],
names=['date', 'id'])
)
df_result = DataFrame(data={'data': [1, 6]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 3, 2), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test', df, chunk_size='M')
chunkstore_lib.delete('test', chunk_range=DateRange(dt(2016, 1, 2), dt(2016, 3, 1)))
assert_frame_equal(chunkstore_lib.read('test'), df_result)
assert(chunkstore_lib.get_info('test')['len'] == len(df_result))
assert(chunkstore_lib.get_info('test')['chunk_count'] == 2)
def test_delete_range_noindex(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3, 4, 5, 6],
'date': [dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 2, 1),
dt(2016, 2, 2),
dt(2016, 3, 1),
dt(2016, 3, 2)]})
df_result = DataFrame(data={'data': [1, 6],
'date': [dt(2016, 1, 1),
dt(2016, 3, 2)]})
chunkstore_lib.write('test', df, chunk_size='M')
chunkstore_lib.delete('test', chunk_range=DateRange(dt(2016, 1, 2), dt(2016, 3, 1)))
assert_frame_equal(chunkstore_lib.read('test'), df_result)
def test_read_chunk_range(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3, 4, 5, 6, 7, 8, 9]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1),
(dt(2016, 2, 1), 1),
(dt(2016, 2, 2), 1),
(dt(2016, 2, 3), 1),
(dt(2016, 3, 1), 1),
(dt(2016, 3, 2), 1),
(dt(2016, 3, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test', df, chunk_size='M')
assert(chunkstore_lib.read('test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 1))).index.get_level_values('date')[0] == dt(2016,1,1))
assert(chunkstore_lib.read('test', chunk_range=DateRange(dt(2016, 1, 2), dt(2016, 1, 2))).index.get_level_values('date')[0] == dt(2016, 1, 2))
assert(chunkstore_lib.read('test', chunk_range=DateRange(dt(2016, 1, 3), dt(2016, 1, 3))).index.get_level_values('date')[0] == dt(2016, 1, 3))
assert(chunkstore_lib.read('test', chunk_range=DateRange(dt(2016, 2, 2), dt(2016, 2, 2))).index.get_level_values('date')[0] == dt(2016, 2, 2))
assert(len(chunkstore_lib.read('test', chunk_range=DateRange(dt(2016, 2, 2), dt(2016, 2, 2)), filter_data=False)) == 3)
df2 = chunkstore_lib.read('test', chunk_range=DateRange(None, None))
assert_frame_equal(df, df2)
def test_read_data_doesnt_exist(chunkstore_lib):
with pytest.raises(NoDataFoundException) as e:
chunkstore_lib.read('some_data')
assert('No data found' in str(e))
def test_invalid_type(chunkstore_lib):
with pytest.raises(Exception) as e:
chunkstore_lib.write('some_data', str("Cannot write a string"), 'D')
assert('Can only chunk DataFrames' in str(e))
def test_append_no_data(chunkstore_lib):
with pytest.raises(NoDataFoundException) as e:
chunkstore_lib.append('some_data', DataFrame())
assert('Symbol does not exist.' in str(e))
def test_append_no_new_data(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3, 4, 5, 6, 7, 8, 9]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1),
(dt(2016, 2, 1), 1),
(dt(2016, 2, 2), 1),
(dt(2016, 2, 3), 1),
(dt(2016, 3, 1), 1),
(dt(2016, 3, 2), 1),
(dt(2016, 3, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test', df)
chunkstore_lib.append('test', df)
r = chunkstore_lib.read('test')
assert_frame_equal(pd.concat([df, df]).sort_index(), r)
def test_overwrite_series(chunkstore_lib):
s = pd.Series([1], index=pd.date_range('2016-01-01',
'2016-01-01',
name='date'),
name='vals')
chunkstore_lib.write('test', s)
chunkstore_lib.write('test', s + 1)
assert_series_equal(chunkstore_lib.read('test'), s + 1)
def test_overwrite_series_monthly(chunkstore_lib):
s = pd.Series([1, 2], index=pd.Index(data=[dt(2016, 1, 1), dt(2016, 2, 1)], name='date'), name='vals')
chunkstore_lib.write('test', s, chunk_size='M')
chunkstore_lib.write('test', s + 1, chunk_size='M')
assert_series_equal(chunkstore_lib.read('test'), s + 1)
def test_pandas_datetime_index_store_series(chunkstore_lib):
df = Series(data=[1, 2, 3],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)],
name='date'),
name='data')
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
s = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 3)))
assert_series_equal(s, df)
def test_yearly_series(chunkstore_lib):
df = Series(data=[1, 2, 3],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 2, 1),
dt(2016, 3, 3)],
name='date'),
name='data')
chunkstore_lib.write('chunkstore_test', df, chunk_size='A')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 3, 3)))
assert_series_equal(df, ret)
def test_store_objects_series(chunkstore_lib):
df = Series(data=['1', '2', '3'],
index=Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)],
name='date'),
name='data')
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
ret = chunkstore_lib.read('chunkstore_test', chunk_range=DateRange(dt(2016, 1, 1), dt(2016, 1, 3)))
assert_series_equal(df, ret)
def test_update_series(chunkstore_lib):
df = Series(data=[1, 2, 3],
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'),
name='data')
df2 = Series(data=[20, 30, 40],
index=pd.Index(data=[dt(2016, 1, 2),
dt(2016, 1, 3),
dt(2016, 1, 4)], name='date'),
name='data')
equals = Series(data=[1, 20, 30, 40],
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3),
dt(2016, 1, 4)], name='date'),
name='data')
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
chunkstore_lib.update('chunkstore_test', df2)
assert_series_equal(chunkstore_lib.read('chunkstore_test'), equals)
def test_update_same_series(chunkstore_lib):
df = Series(data=[1, 2, 3],
index=pd.Index(data=[dt(2016, 1, 1),
dt(2016, 1, 2),
dt(2016, 1, 3)], name='date'),
name='data')
chunkstore_lib.write('chunkstore_test', df, chunk_size='D')
sym = chunkstore_lib._get_symbol_info('chunkstore_test')
chunkstore_lib.update('chunkstore_test', df)
assert(sym == chunkstore_lib._get_symbol_info('chunkstore_test'))
def test_dtype_mismatch(chunkstore_lib):
s = pd.Series([1], index=pd.date_range('2016-01-01', '2016-01-01', name='date'), name='vals')
# Write with an int
chunkstore_lib.write('test', s, chunk_size='D')
assert(str(chunkstore_lib.read('test').dtype) == 'int64')
# Update with a float
chunkstore_lib.update('test', s * 1.0)
assert(str(chunkstore_lib.read('test').dtype) == 'float64')
chunkstore_lib.write('test', s * 1.0, chunk_size='D')
assert(str(chunkstore_lib.read('test').dtype) == 'float64')
def test_read_column_subset(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'open': [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9],
'close': [1.2, 2.3, 3.4, 4.5, 5.6, 6.7, 7.8, 8.9, 9.0],
'prev_close': [.1, .2, .3, .4, .5, .6, .7, .8, .8],
'volume': [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]
},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1),
(dt(2016, 2, 1), 1),
(dt(2016, 2, 2), 1),
(dt(2016, 2, 3), 1),
(dt(2016, 3, 1), 1),
(dt(2016, 3, 2), 1),
(dt(2016, 3, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test', df, chunk_size='D')
r = chunkstore_lib.read('test', columns=['prev_close', 'volume'])
assert_frame_equal(r, df[['prev_close', 'volume']])
def test_rename(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'open': [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9],
'close': [1.2, 2.3, 3.4, 4.5, 5.6, 6.7, 7.8, 8.9, 9.0],
'prev_close': [.1, .2, .3, .4, .5, .6, .7, .8, .8],
'volume': [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]
},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1),
(dt(2016, 2, 1), 1),
(dt(2016, 2, 2), 1),
(dt(2016, 2, 3), 1),
(dt(2016, 3, 1), 1),
(dt(2016, 3, 2), 1),
(dt(2016, 3, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test', df, chunk_size='D')
assert_frame_equal(chunkstore_lib.read('test'), df)
chunkstore_lib.rename('test', 'new_name')
assert_frame_equal(chunkstore_lib.read('new_name'), df)
with pytest.raises(Exception) as e:
chunkstore_lib.rename('new_name', 'new_name')
assert('already exists' in str(e))
with pytest.raises(NoDataFoundException) as e:
chunkstore_lib.rename('doesnt_exist', 'temp')
assert('No data found for doesnt_exist' in str(e))
assert('test' not in chunkstore_lib.list_symbols())
'''
read out all chunks that have symbol set to 'test'. List should be empty
'''
chunks = []
for x in chunkstore_lib._collection.find({SYMBOL: 'test'}, sort=[(START, pymongo.ASCENDING)],):
chunks.append(x)
assert(len(chunks) == 0)
def test_pass_thru_chunker(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]})
chunkstore_lib.write('test_df', df, chunker=PassthroughChunker())
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(df, read_df)
def test_pass_thru_chunker_append(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]})
df2 = DataFrame(data={'data': [4, 5, 6]})
chunkstore_lib.write('test_df', df, chunker=PassthroughChunker())
chunkstore_lib.append('test_df', df2)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(pd.concat([df, df2], ignore_index=True), read_df)
def test_pass_thru_chunker_update(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]})
df2 = DataFrame(data={'data': [5, 6, 7]})
chunkstore_lib.write('test_df', df, chunker=PassthroughChunker())
chunkstore_lib.update('test_df', df2)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(df2, read_df)
def test_pass_thru_chunker_update_range(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]})
df2 = DataFrame(data={'data': [5, 6, 7]})
chunkstore_lib.write('test_df', df, chunker=PassthroughChunker())
chunkstore_lib.update('test_df', df2, chunk_range="")
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(read_df, df2)
def test_size_chunking(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=5500000),
'date': [dt(2016, 1, 1)] * 5500000})
chunkstore_lib.write('test_df', df)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(df, read_df)
def test_size_chunk_append(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=5500000),
'date': [dt(2016, 1, 1)] * 5500000})
dg = DataFrame(data={'data': np.random.randint(0, 100, size=5500000),
'date': [dt(2016, 1, 1)] * 5500000})
chunkstore_lib.write('test_df', df)
chunkstore_lib.append('test_df', dg)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(pd.concat([df, dg], ignore_index=True), read_df)
def test_delete_range_segment(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=7000000),
'date': [dt(2016, 1, 1)] * 7000000})
dg = DataFrame(data={'data': np.random.randint(0, 100, size=100),
'date': [dt(2016, 1, 2)] * 100})
chunkstore_lib.write('test_df', pd.concat([df, dg], ignore_index=True), chunk_size='M')
chunkstore_lib.delete('test_df')
assert('test_df' not in chunkstore_lib.list_symbols())
chunkstore_lib.write('test_df', pd.concat([df, dg], ignore_index=True), chunk_size='M')
chunkstore_lib.delete('test_df', chunk_range=pd.date_range(dt(2016, 1, 1), dt(2016, 1, 1)))
read_df = chunkstore_lib.read('test_df')
assert(read_df.equals(dg))
assert(chunkstore_lib._collection.count({'sy': 'test_df'}) == 1)
def test_size_chunk_update(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=5500000),
'date': [dt(2016, 1, 1)] * 5500000})
dg = DataFrame(data={'data': np.random.randint(0, 100, size=5500000),
'date': [dt(2016, 1, 1)] * 5500000})
dh = DataFrame(data={'data': np.random.randint(0, 100, size=100),
'date': [dt(2016, 1, 1)] * 100})
chunkstore_lib.write('test_df', df)
chunkstore_lib.append('test_df', dg)
chunkstore_lib.update('test_df', dh)
read_df = chunkstore_lib.read('test_df')
assert_frame_equal(dh, read_df)
assert(chunkstore_lib._collection.count({'sy': 'test_df'}) == 1)
def test_size_chunk_multiple_update(chunkstore_lib):
df_large = DataFrame(data={'data': np.random.randint(0, 100, size=5500000),
'date': [dt(2015, 1, 1)] * 5500000})
df_small = DataFrame(data={'data': np.random.randint(0, 100, size=100),
'date': [dt(2016, 1, 1)] * 100})
chunkstore_lib.update('test_df', df_large, upsert=True)
chunkstore_lib.update('test_df', df_small, upsert=True)
read_df = chunkstore_lib.read('test_df')
expected = pd.concat([df_large, df_small]).reset_index(drop=True)
assert_frame_equal(expected, read_df)
assert(chunkstore_lib._collection.count({'sy': 'test_df'}) == 3)
def test_get_chunk_range(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df, chunk_size='D')
x = list(chunkstore_lib.get_chunk_ranges('test_df'))
assert(len(x) == 3)
assert((b'2016-01-01', b'2016-01-01') in x)
assert((b'2016-01-02', b'2016-01-02') in x)
assert((b'2016-01-03', b'2016-01-03') in x)
def test_iterators(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
chunkstore_lib.write('test_df', df, chunk_size='D')
for x, d in enumerate(chunkstore_lib.iterator('test_df')):
assert(len(d) == 1)
assert(d.data[0] == x + 1)
for x, d in enumerate(chunkstore_lib.reverse_iterator('test_df')):
assert(len(d) == 1)
assert(d.data[0] == len(df) - x)
dr = DateRange(dt(2016, 1, 2), dt(2016, 1, 2))
assert(len(list(chunkstore_lib.iterator('test_df', chunk_range=dr))) == 1)
assert(len(list(chunkstore_lib.reverse_iterator('test_df', chunk_range=dr))) == 1)
def test_unnamed_colums(chunkstore_lib):
df = DataFrame(data={'data': [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', None])
)
with pytest.raises(Exception) as e:
chunkstore_lib.write('test_df', df, chunk_size='D')
assert('must be named' in str(e))
df = DataFrame(data={None: [1, 2, 3]},
index=MultiIndex.from_tuples([(dt(2016, 1, 1), 1),
(dt(2016, 1, 2), 1),
(dt(2016, 1, 3), 1)],
names=['date', 'id'])
)
with pytest.raises(Exception) as e:
chunkstore_lib.write('test_df', df, chunk_size='D')
assert('must be named' in str(e))
def test_quarterly_data(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=366)},
index=pd.date_range('2016-01-01', '2016-12-31'))
df.index.name = 'date'
chunkstore_lib.write('quarterly', df, chunk_size='Q')
assert_frame_equal(df, chunkstore_lib.read('quarterly'))
assert(len(chunkstore_lib.read('quarterly', chunk_range=(None, '2016-01-05'))) == 5)
count = 0
for _ in chunkstore_lib._collection.find({SYMBOL: 'quarterly'}, sort=[(START, pymongo.ASCENDING)],):
count += 1
assert(count == 4)
def test_list_symbols(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=366)},
index=pd.date_range('2016-01-01', '2016-12-31'))
df.index.name = 'date'
chunkstore_lib.write('rabbit', df)
chunkstore_lib.write('dragon', df)
chunkstore_lib.write('snake', df)
chunkstore_lib.write('wolf', df)
chunkstore_lib.write('bear', df)
assert('dragon' in chunkstore_lib.list_symbols())
assert(set(['rabbit', 'dragon', 'bear']) == set(chunkstore_lib.list_symbols(partial_match='r')))
assert(chunkstore_lib.has_symbol('dragon'))
assert(chunkstore_lib.has_symbol('marmot') is False)
def test_stats(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=366)},
index=pd.date_range('2016-01-01', '2016-12-31'))
df.index.name = 'date'
chunkstore_lib.write('rabbit', df)
chunkstore_lib.write('dragon', df)
chunkstore_lib.write('snake', df)
chunkstore_lib.write('wolf', df)
chunkstore_lib.write('bear', df)
s = chunkstore_lib.stats()
assert(s['symbols']['count'] == 5)
assert(s['chunks']['count'] == 366 * 5)
assert(s['chunks']['count'] == s['metadata']['count'])
def test_metadata(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=2)},
index=pd.date_range('2016-01-01', '2016-01-02'))
df.index.name = 'date'
chunkstore_lib.write('data', df, metadata = 'some metadata')
m = chunkstore_lib.read_metadata('data')
assert(m == u'some metadata')
def test_metadata_update(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=2)},
index=pd.date_range('2016-01-01', '2016-01-02'))
df.index.name = 'date'
chunkstore_lib.write('data', df, metadata = 'some metadata', chunk_size='M')
df = DataFrame(data={'data': np.random.randint(0, 100, size=1)},
index=pd.date_range('2016-01-02', '2016-01-02'))
df.index.name = 'date'
chunkstore_lib.update('data', df, metadata='different metadata')
m = chunkstore_lib.read_metadata('data')
assert(m == u'different metadata')
def test_metadata_nosymbol(chunkstore_lib):
with pytest.raises(NoDataFoundException):
chunkstore_lib.read_metadata('None')
def test_metadata_none(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=2)},
index=pd.date_range('2016-01-01', '2016-01-02'))
df.index.name = 'date'
chunkstore_lib.write('data', df, chunk_size='M')
assert(chunkstore_lib.read_metadata('data') == None)
def test_metadata_invalid(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=2)},
index=pd.date_range('2016-01-01', '2016-01-02'))
df.index.name = 'date'
with pytest.raises(Exception) as e:
chunkstore_lib.write('data', df, chunk_size='M', metadata=df)
def test_write_metadata(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=2)},
index=pd.date_range('2016-01-01', '2016-01-02'))
df.index.name = 'date'
chunkstore_lib.write('data', df)
chunkstore_lib.write_metadata('data', 'meta')
m = chunkstore_lib.read_metadata('data')
assert(m == u'meta')
def test_write_metadata_nosymbol(chunkstore_lib):
with pytest.raises(NoDataFoundException):
chunkstore_lib.write_metadata('doesnt_exist', 'meta')
def test_audit(chunkstore_lib):
df = DataFrame(data={'data': np.random.randint(0, 100, size=2)},
index=pd.date_range('2016-01-01', '2016-01-02'))
df.index.name = 'date'
chunkstore_lib.write('data', df, audit={'user': 'test_user'})
df = DataFrame(data={'data': np.random.randint(0, 100, size=10)},
index=pd.date_range('2016-01-01', '2016-01-10'))
df.index.name = 'date'
chunkstore_lib.write('data', df, audit={'user': 'other_user'})
assert(len(chunkstore_lib.read_audit_log()) == 2)
assert(len(chunkstore_lib.read_audit_log(symbol='data')) == 2)
assert(len(chunkstore_lib.read_audit_log(symbol='none')) == 0)
chunkstore_lib.append('data', df, audit={'user': 'test_user'})
assert(chunkstore_lib.read_audit_log()[-1]['appended_rows'] == 10)
df = DataFrame(data={'data': np.random.randint(0, 100, size=5)},
index=pd.date_range('2017-01-01', '2017-01-5'))
df.index.name = 'date'
chunkstore_lib.update('data', df, audit={'user': 'other_user'})
assert(chunkstore_lib.read_audit_log()[-1]['new_chunks'] == 5)
chunkstore_lib.rename('data', 'data_new', audit={'user': 'temp_user'})
assert(chunkstore_lib.read_audit_log()[-1]['action'] == 'symbol rename')
chunkstore_lib.delete('data_new', chunk_range=DateRange('2016-01-01', '2016-01-02'), audit={'user': 'test_user'})
chunkstore_lib.delete('data_new', audit={'user': 'test_user'})
assert(chunkstore_lib.read_audit_log()[-1]['action'] == 'symbol delete')
assert(chunkstore_lib.read_audit_log()[-2]['action'] == 'range delete')
def test_chunkstore_misc(chunkstore_lib):
p = pickle.dumps(chunkstore_lib)
c = pickle.loads(p)
assert(chunkstore_lib._arctic_lib.get_name() == c._arctic_lib.get_name())
assert("arctic_test.TEST" in str(chunkstore_lib))
assert(str(chunkstore_lib) == repr(chunkstore_lib))
def test_unsorted_index(chunkstore_lib):
df = pd.DataFrame({'date': [dt(2016,9,1), dt(2016,8,31)],
'vals': range(2)}).set_index('date')
df2 = pd.DataFrame({'date': [dt(2016,9,2), dt(2016,9,1)],
'vals': range(2)}).set_index('date')
chunkstore_lib.write('test_symbol', df)
assert_frame_equal(df.sort_index(), chunkstore_lib.read('test_symbol'))
chunkstore_lib.update('test_symbol', df2)
assert_frame_equal(chunkstore_lib.read('test_symbol'),
pd.DataFrame({'date': pd.date_range('2016-8-31',
'2016-9-2'),
'vals': [1,1,0]}).set_index('date'))
def test_unsorted_date_col(chunkstore_lib):
df = pd.DataFrame({'date': [dt(2016,9,1), dt(2016,8,31)],
'vals': range(2)})
df2 = pd.DataFrame({'date': [dt(2016,9,2), dt(2016,9,1)],
'vals': range(2)})
chunkstore_lib.write('test_symbol', df)
try:
df = df.sort_values('date')
except AttributeError:
df = df.sort(columns='date')
assert_frame_equal(df.reset_index(drop=True), chunkstore_lib.read('test_symbol'))
chunkstore_lib.update('test_symbol', df2)
assert_frame_equal(chunkstore_lib.read('test_symbol'),
pd.DataFrame({'date': pd.date_range('2016-8-31',
'2016-9-2'),
'vals': [1,1,0]}))
def test_chunk_range_with_dti(chunkstore_lib):
df = pd.DataFrame({'date': [dt(2016,9,1), dt(2016,8,31)],
'vals': range(2)})
chunkstore_lib.write('data', df)
assert(len(list(chunkstore_lib.get_chunk_ranges('data', chunk_range=pd.date_range(dt(2016,1,1), dt(2016, 12, 31))))) == 2)
| lgpl-2.1 |
alistairlow/tensorflow | tensorflow/contrib/distributions/python/ops/mixture_same_family.py | 12 | 14331 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The same-family Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class MixtureSameFamily(distribution.Distribution):
"""Mixture (same-family) distribution.
The `MixtureSameFamily` distribution implements a (batch of) mixture
distribution where all components are from different parameterizations of the
same distribution type. It is parameterized by a `Categorical` "selecting
distribution" (over `k` components) and a components distribution, i.e., a
`Distribution` with a rightmost batch shape (equal to `[k]`) which indexes
each (batch of) component.
#### Examples
```python
import matplotlib.pyplot as plt
ds = tf.contrib.distributions
### Create a mixture of two scalar Gaussians:
gm = ds.MixtureSameFamily(
mixture_distribution=ds.Categorical(
probs=[0.3, 0.7]),
components_distribution=ds.Normal(
loc=[-1., 1], # One for each component.
scale=[0.1, 0.5])) # And same here.
gm.mean()
# ==> 0.4
gm.variance()
# ==> 1.018
# Plot PDF.
x = np.linspace(-2., 3., int(1e4), dtype=np.float32)
plt.plot(x, gm.prob(x).eval());
### Create a mixture of two Bivariate Gaussians:
gm = ds.MixtureSameFamily(
mixture_distribution=ds.Categorical(
probs=[0.3, 0.7]),
components_distribution=ds.MultivariateNormalDiag(
loc=[[-1., 1], # component 1
[1, -1]], # component 2
scale_identity_multiplier=[.3, .6]))
gm.mean()
# ==> array([ 0.4, -0.4], dtype=float32)
gm.covariance()
# ==> array([[ 1.119, -0.84],
# [-0.84, 1.119]], dtype=float32)
# Plot PDF contours.
def meshgrid(x, y=x):
[gx, gy] = np.meshgrid(x, y, indexing='ij')
gx, gy = np.float32(gx), np.float32(gy)
grid = np.concatenate([gx.ravel()[None, :], gy.ravel()[None, :]], axis=0)
return grid.T.reshape(x.size, y.size, 2)
grid = meshgrid(np.linspace(-2, 2, 100, dtype=np.float32))
plt.contour(grid[..., 0], grid[..., 1], gm.prob(grid).eval());
```
"""
def __init__(self,
mixture_distribution,
components_distribution,
validate_args=False,
allow_nan_stats=True,
name="MixtureSameFamily"):
"""Construct a `MixtureSameFamily` distribution.
Args:
mixture_distribution: `tf.distributions.Categorical`-like instance.
Manages the probability of selecting components. The number of
categories must match the rightmost batch dimension of the
`components_distribution`. Must have either scalar `batch_shape` or
`batch_shape` matching `components_distribution.batch_shape[:-1]`.
components_distribution: `tf.distributions.Distribution`-like instance.
Right-most batch dimension indexes components.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: `if not mixture_distribution.dtype.is_integer`.
ValueError: if mixture_distribution does not have scalar `event_shape`.
ValueError: if `mixture_distribution.batch_shape` and
`components_distribution.batch_shape[:-1]` are both fully defined and
the former is neither scalar nor equal to the latter.
ValueError: if `mixture_distribution` categories does not equal
`components_distribution` rightmost batch shape.
"""
parameters = locals()
with ops.name_scope(name):
self._mixture_distribution = mixture_distribution
self._components_distribution = components_distribution
self._runtime_assertions = []
s = components_distribution.event_shape_tensor()
self._event_ndims = (s.shape[0].value
if s.shape.with_rank_at_least(1)[0].value is not None
else array_ops.shape(s)[0])
if not mixture_distribution.dtype.is_integer:
raise ValueError(
"`mixture_distribution.dtype` ({}) is not over integers".format(
mixture_distribution.dtype.name))
if (mixture_distribution.event_shape.ndims is not None
and mixture_distribution.event_shape.ndims != 0):
raise ValueError("`mixture_distribution` must have scalar `event_dim`s")
elif validate_args:
self._runtime_assertions += [
control_flow_ops.assert_has_rank(
mixture_distribution.event_shape_tensor(), 0,
message="`mixture_distribution` must have scalar `event_dim`s"),
]
mdbs = mixture_distribution.batch_shape
cdbs = components_distribution.batch_shape.with_rank_at_least(1)[:-1]
if mdbs.is_fully_defined() and cdbs.is_fully_defined():
if mdbs.ndims != 0 and mdbs != cdbs:
raise ValueError(
"`mixture_distribution.batch_shape` (`{}`) is not "
"compatible with `components_distribution.batch_shape` "
"(`{}`)".format(mdbs.as_list(), cdbs.as_list()))
elif validate_args:
mdbs = mixture_distribution.batch_shape_tensor()
cdbs = components_distribution.batch_shape_tensor()[:-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
distribution_util.pick_vector(
mixture_distribution.is_scalar_batch(), cdbs, mdbs),
cdbs,
message=(
"`mixture_distribution.batch_shape` is not "
"compatible with `components_distribution.batch_shape`"))]
km = mixture_distribution.logits.shape.with_rank_at_least(1)[-1].value
kc = components_distribution.batch_shape.with_rank_at_least(1)[-1].value
if km is not None and kc is not None and km != kc:
raise ValueError("`mixture_distribution components` ({}) does not "
"equal `components_distribution.batch_shape[-1]` "
"({})".format(km, kc))
elif validate_args:
km = array_ops.shape(mixture_distribution.logits)[-1]
kc = components_distribution.batch_shape_tensor()[-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
km, kc,
message=("`mixture_distribution components` does not equal "
"`components_distribution.batch_shape[-1:]`")),
]
elif km is None:
km = array_ops.shape(mixture_distribution.logits)[-1]
self._num_components = km
super(MixtureSameFamily, self).__init__(
dtype=self._components_distribution.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
self._mixture_distribution._graph_parents # pylint: disable=protected-access
+ self._components_distribution._graph_parents), # pylint: disable=protected-access
name=name)
@property
def mixture_distribution(self):
return self._mixture_distribution
@property
def components_distribution(self):
return self._components_distribution
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.batch_shape_tensor()[:-1]
def _batch_shape(self):
return self.components_distribution.batch_shape.with_rank_at_least(1)[:-1]
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.event_shape_tensor()
def _event_shape(self):
return self.components_distribution.event_shape
def _sample_n(self, n, seed):
with ops.control_dependencies(self._runtime_assertions):
x = self.components_distribution.sample(n) # [n, B, k, E]
# TODO(jvdillon): Consider using tf.gather (by way of index unrolling).
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=self.mixture_distribution.sample(n), # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = self._pad_mix_dims(mask) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask, axis=-1 - self._event_ndims) # [n, B, E]
def _log_prob(self, x):
with ops.control_dependencies(self._runtime_assertions):
x = self._pad_sample_dims(x)
log_prob_x = self.components_distribution.log_prob(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, dim=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_prob_x + log_mix_prob, axis=-1) # [S, B]
def _mean(self):
with ops.control_dependencies(self._runtime_assertions):
probs = self._pad_mix_dims(
self.mixture_distribution.probs) # [B, k, [1]*e]
return math_ops.reduce_sum(
probs * self.components_distribution.mean(),
axis=-1 - self._event_ndims) # [B, E]
def _log_cdf(self, x):
x = self._pad_sample_dims(x)
log_cdf_x = self.components_distribution.log_cdf(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, dim=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_cdf_x + log_mix_prob, axis=-1) # [S, B]
def _variance(self):
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = self._pad_mix_dims(
self.mixture_distribution.probs) # [B, k, [1]*e]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.variance(),
axis=-1 - self._event_ndims) # [B, E]
var_cond_mean = math_ops.reduce_sum(
probs * math_ops.squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-1 - self._event_ndims) # [B, E]
return mean_cond_var + var_cond_mean # [B, E]
def _covariance(self):
static_event_ndims = self.event_shape.ndims
if static_event_ndims != 1:
# Covariance is defined only for vector distributions.
raise NotImplementedError("covariance is not implemented")
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = self._pad_mix_dims(self._pad_mix_dims(
self.mixture_distribution.probs)) # [B, k, 1, 1]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.covariance(),
axis=-3) # [B, e, e]
var_cond_mean = math_ops.reduce_sum(
probs * _outer_squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-3) # [B, e, e]
return mean_cond_var + var_cond_mean # [B, e, e]
def _pad_sample_dims(self, x):
with ops.name_scope("pad_sample_dims", values=[x]):
ndims = x.shape.ndims if x.shape.ndims is not None else array_ops.rank(x)
shape = array_ops.shape(x)
d = ndims - self._event_ndims
x = array_ops.reshape(x, shape=array_ops.concat([
shape[:d], [1], shape[d:]], axis=0))
return x
def _pad_mix_dims(self, x):
with ops.name_scope("pad_mix_dims", values=[x]):
def _get_ndims(d):
if d.batch_shape.ndims is not None:
return d.batch_shape.ndims
return array_ops.shape(d.batch_shape_tensor())[0]
dist_batch_ndims = _get_ndims(self)
cat_batch_ndims = _get_ndims(self.mixture_distribution)
bnd = distribution_util.pick_vector(
self.mixture_distribution.is_scalar_batch(),
[dist_batch_ndims], [cat_batch_ndims])[0]
s = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([
s[:-1],
array_ops.ones([bnd], dtype=dtypes.int32),
s[-1:],
array_ops.ones([self._event_ndims], dtype=dtypes.int32),
], axis=0))
return x
def _outer_squared_difference(x, y):
"""Convenience function analogous to tf.squared_difference."""
z = x - y
return z[..., array_ops.newaxis, :] * z[..., array_ops.newaxis]
| apache-2.0 |
stccenter/datadiscovery | ranking/sorting.py | 1 | 2328 | """
Created on Tue Jun 6 13:22:42 2017
@author: larakamal
enchanced sorting
this code uses insertion sort to rank the documents
the input of the code is a saved machine learning model
from ranking_save_model.py and a data file from data/labelled_queries
the output of the file is a ranked data file,
change the directory of the output file accordingly in line 67
"""
import pandas as pd
import csv
#sorting function
def insertion_sort(features, model):
#initialize rank from 0 to length of features
rank = []
for i in range(len(features)):
rank.append(i)
#insertion sort to sort features and rank list
#insertion sort assumes transitivity
for i in range(1, len(features)):
j = i-1
temp = features[i]
temp2 = rank[i]
while(model.predict((features[j] - temp).reshape(1,-1)) == 0 and (j >=0)):
features[j+1] = features[j]
rank[j+1] = rank[j]
j = j - 1
features[j+1] = temp
rank[j+1] = temp2
return rank
def get_search_results(model, scaler, algorithm):
#extract 10 features from documents
queries = ["gravity", "ocean pressure", "ocean temperature", "ocean wind", "pathfinder", "quikscat",
"radar", "saline density", "sea ice"]
for q in queries:
path = "data/labelled_queries/" + q + ".csv";
dataframe = pd.read_csv(path)
output_path = "data/results/test/" + algorithm + "/" + q + "_sorted.csv";
features = dataframe.ix[:,0:10]
features = scaler.transform(features)
rank = insertion_sort(features, model)
#re-arrange documents accordingly
rows = dataframe.ix[:,0:11]
sorted_rows =[]
for i in rank:
sorted_rows.append(rows.values[i])
#save file
with open(output_path, 'w', encoding = 'utf-8-sig') as outcsv:
writer = csv.writer(outcsv)
writer.writerow(['term_score', 'releaseDate_score', 'versionNum_score', 'processingL_score', 'allPop_score','monthPop_score', 'userPop_score', 'spatialR_score','temporalR_score','click_score','label'])
for i in sorted_rows:
writer.writerow(i)
| apache-2.0 |
fw1121/bcbio-nextgen | bcbio/utils.py | 2 | 18266 | """Helpful utilities for building analysis pipelines.
"""
import gzip
import os
import tempfile
import time
import shutil
import contextlib
import itertools
import functools
import random
import ConfigParser
import collections
import fnmatch
import subprocess
import toolz as tz
import yaml
try:
from concurrent import futures
except ImportError:
try:
import futures
except ImportError:
futures = None
@contextlib.contextmanager
def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown()
def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return apply(f, *args, **kwargs)
return wrapper
def transform_to(ext):
"""
Decorator to create an output filename from an output filename with
the specified extension. Changes the extension, in_file is transformed
to a new type.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@transform(".bam")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.bam")
@transform(".bam")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = replace_suffix(os.path.basename(in_path), ext)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def filter_to(word):
"""
Decorator to create an output filename from an input filename by
adding a word onto the stem. in_file is filtered by the function
and the results are written to out_file. You would want to use
this over transform_to if you don't know the extension of the file
going in. This also memoizes the output file.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@filter_to(".foo")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.foo.bam")
@filter_to(".foo")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.foo.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = append_stem(os.path.basename(in_path), word)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def memoize_outfile(ext=None, stem=None):
"""
Memoization decorator.
See docstring for transform_to and filter_to for details.
"""
if ext:
return transform_to(ext)
if stem:
return filter_to(stem)
def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname
@contextlib.contextmanager
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
safe_makedir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
@contextlib.contextmanager
def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname)
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False
def file_uptodate(fname, cmp_fname):
"""Check if a file exists, is non-empty and is more recent than cmp_fname.
"""
try:
return (file_exists(fname) and file_exists(cmp_fname) and
os.path.getmtime(fname) >= os.path.getmtime(cmp_fname))
except OSError:
return False
def create_dirs(config, names=None):
if names is None:
names = config["dir"].keys()
for dname in names:
d = config["dir"][dname]
safe_makedir(d)
def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
with open(fname, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason)
def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = ConfigParser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config
def add_full_path(dirname, basedir=None):
if basedir is None:
basedir = os.getcwd()
if not dirname.startswith("/"):
dirname = os.path.join(basedir, dirname)
return dirname
def splitext_plus(f):
"""Split on file extensions, allowing for zipped extensions.
"""
base, ext = os.path.splitext(f)
if ext in [".gz", ".bz2", ".zip"]:
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
return base, ext
def remove_safe(f):
try:
os.remove(f)
except OSError:
pass
def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname]
def symlink_plus(orig, new):
"""Create relative symlinks and handle associated biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
with chdir(os.path.dirname(new)):
remove_safe(new + ext)
# Work around symlink issues on some filesystems. Randomly
# fail to symlink.
try:
os.symlink(os.path.relpath(orig + ext), os.path.basename(new + ext))
except OSError:
if not os.path.exists(new + ext) or not os.path.lexists(new + ext):
remove_safe(new + ext)
shutil.copyfile(orig + ext, new + ext)
orig_noext = splitext_plus(orig)[0]
new_noext = splitext_plus(new)[0]
for sub_ext in [".bai"]:
if os.path.exists(orig_noext + sub_ext) and not os.path.lexists(new_noext + sub_ext):
with chdir(os.path.dirname(new_noext)):
os.symlink(os.path.relpath(orig_noext + sub_ext), os.path.basename(new_noext + sub_ext))
def open_gzipsafe(f):
return gzip.open(f) if f.endswith(".gz") else open(f)
def append_stem(to_transform, word):
"""
renames a filename or list of filenames with 'word' appended to the stem
of each one:
example: append_stem("/path/to/test.sam", "_filtered") ->
"/path/to/test_filtered.sam"
"""
if is_sequence(to_transform):
return [append_stem(f, word) for f in to_transform]
elif is_string(to_transform):
(base, ext) = splitext_plus(to_transform)
return "".join([base, word, ext])
else:
raise ValueError("append_stem takes a single filename as a string or "
"a list of filenames to transform.")
def replace_suffix(to_transform, suffix):
"""
replaces the suffix on a filename or list of filenames
example: replace_suffix("/path/to/test.sam", ".bam") ->
"/path/to/test.bam"
"""
if is_sequence(to_transform):
transformed = []
for f in to_transform:
(base, _) = os.path.splitext(f)
transformed.append(base + suffix)
return transformed
elif is_string(to_transform):
(base, _) = os.path.splitext(to_transform)
return base + suffix
else:
raise ValueError("replace_suffix takes a single filename as a string or "
"a list of filenames to transform.")
# ## Functional programming
def partition_all(n, iterable):
"""Partition a list into equally sized pieces, including last smaller parts
http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
break
yield chunk
def partition(pred, iterable):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
return itertools.ifilterfalse(pred, t1), itertools.ifilter(pred, t2)
# ## Dealing with configuration files
def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.iteritems():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out
def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.iteritems():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out
def get_in(d, t, default=None):
"""
look up if you can get a tuple of values from a nested dictionary,
each item in the tuple a deeper layer
example: get_in({1: {2: 3}}, (1, 2)) -> 3
example: get_in({1: {2: 3}}, (2, 3)) -> {}
"""
return tz.get_in(t, d, default)
def flatten(l):
"""
flatten an irregular list of lists
example: flatten([[[1, 2, 3], [4, 5]], 6]) -> [1, 2, 3, 4, 5, 6]
lifted from: http://stackoverflow.com/questions/2158395/
"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el,
basestring):
for sub in flatten(el):
yield sub
else:
yield el
def is_sequence(arg):
"""
check if 'arg' is a sequence
example: arg([]) -> True
example: arg("lol") -> False
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
def is_pair(arg):
"""
check if 'arg' is a two-item sequence
"""
return is_sequence(arg) and len(arg) == 2
def is_string(arg):
return isinstance(arg, basestring)
def locate(pattern, root=os.curdir):
'''Locate all files matching supplied filename pattern in and below
supplied root directory.'''
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def itersubclasses(cls, _seen=None):
"""
snagged from: http://code.activestate.com/recipes/576949/
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(itersubclasses(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def replace_directory(out_files, dest_dir):
"""
change the output directory to dest_dir
can take a string (single file) or a list of files
"""
if is_sequence(out_files):
filenames = map(os.path.basename, out_files)
return [os.path.join(dest_dir, x) for x in filenames]
elif is_string(out_files):
return os.path.join(dest_dir, os.path.basename(out_files))
else:
raise ValueError("in_files must either be a sequence of filenames "
"or a string")
def which(program):
""" returns the path to an executable or None if it can't be found"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def reservoir_sample(stream, num_items, item_parser=lambda x: x):
"""
samples num_items from the stream keeping each with equal probability
"""
kept = []
for index, item in enumerate(stream):
if index < num_items:
kept.append(item_parser(item))
else:
r = random.randint(0, index)
if r < num_items:
kept[r] = item_parser(item)
return kept
def compose(f, g):
return lambda x: f(g(x))
def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for k, v in d.items():
if isinstance(v, dict):
v = dictapply(v, fn)
else:
d[k] = fn(v)
return d
def R_sitelib():
"""Retrieve the R site-library installed with the bcbio installer.
"""
from bcbio import install
return os.path.join(install.get_defaults().get("tooldir", "/usr/local"),
"lib", "R", "site-library")
def R_package_path(package):
"""
return the path to an installed R package
"""
local_sitelib = R_sitelib()
cmd = """Rscript -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'"""
try:
output = subprocess.check_output(cmd.format(**locals()), shell=True)
except subprocess.CalledProcessError, e:
return None
for line in output.split("\n"):
if "[1]" not in line:
continue
dirname = line.split("[1]")[1].replace("\"", "").strip()
if os.path.exists(dirname):
return dirname
return None
def is_gzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".gz", "gzip"]
def is_bzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".bz2", "bzip2"]
def open_possible_gzip(fname, flag="r"):
if is_gzipped(fname):
if "b" not in flag:
flag += "b"
return gzip.open(fname, flag)
else:
return open(fname, flag)
def filter_missing(xs):
"""
remove items from a list if they evaluate to False
"""
return filter(lambda x: x, xs)
def rbind(dfs):
"""
acts like rbind for pandas dataframes
"""
if len(dfs) == 1:
return dfs[0]
df = dfs[0]
for d in dfs[1:]:
df = df.append(d)
return df
| mit |
Fireblend/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
marionleborgne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axes.py | 69 | 259904 | from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x=%s, y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars
assert len(height)==nbars, ("argument 'height' must be %d or scalar" %
nbars)
assert len(width)==nbars, ("argument 'width' must be %d or scalar" %
nbars)
assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" %
nbars)
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len(%s) or scalar" % nbars)
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len(%s) or scalar" % nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range != None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: %s' % histtype
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: %s' % align
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: %s' % orientation
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: %s' % align
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: %s' % histtype
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: %s' % orientation
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: %s' % histtype
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.